diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 7a6bb722d986..aff975642a72 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -17,7 +17,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
- go-version: 1.19
+ go-version: '>=1.20.0'
- uses: actions/checkout@v2
with:
diff --git a/builder/Dockerfile b/builder/Dockerfile
index 6f7bf98a9d9e..cb5802480182 100644
--- a/builder/Dockerfile
+++ b/builder/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM golang:1.19
+FROM golang:1.20
LABEL maintainer="Marcin Wielgus "
ENV GOPATH /gopath/
diff --git a/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go b/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go
index 05ba69c5514e..1b1e64925c5f 100644
--- a/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go
+++ b/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go
@@ -18,17 +18,18 @@ package aws
import (
"fmt"
- "gopkg.in/gcfg.v1"
"io"
+ "os"
+ "strconv"
+ "strings"
+
+ "gopkg.in/gcfg.v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws/ec2metadata"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws/endpoints"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws/session"
+ provider_aws "k8s.io/cloud-provider-aws/pkg/providers/v1"
"k8s.io/klog/v2"
- provider_aws "k8s.io/legacy-cloud-providers/aws"
- "os"
- "strconv"
- "strings"
)
// createAWSSDKProvider
diff --git a/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider_test.go b/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider_test.go
index 420d32e28911..1e45343f3538 100644
--- a/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider_test.go
+++ b/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider_test.go
@@ -23,7 +23,7 @@ import (
"io"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws/ec2metadata"
- provider_aws "k8s.io/legacy-cloud-providers/aws"
+ provider_aws "k8s.io/cloud-provider-aws/pkg/providers/v1"
"net/http"
"net/http/httptest"
"os"
diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod
index 6a6bf460c72f..9db45514a0bb 100644
--- a/cluster-autoscaler/go.mod
+++ b/cluster-autoscaler/go.mod
@@ -1,9 +1,9 @@
module k8s.io/autoscaler/cluster-autoscaler
-go 1.19
+go 1.20
require (
- cloud.google.com/go v0.97.0
+ cloud.google.com/go/compute/metadata v0.2.3
github.com/Azure/azure-sdk-for-go v67.2.0+incompatible
github.com/Azure/go-autorest/autorest v0.11.28
github.com/Azure/go-autorest/autorest/adal v0.9.21
@@ -11,7 +11,7 @@ require (
github.com/Azure/go-autorest/autorest/date v0.3.0
github.com/Azure/go-autorest/autorest/to v0.4.0
github.com/Azure/skewer v0.0.14
- github.com/aws/aws-sdk-go v1.44.147
+ github.com/aws/aws-sdk-go v1.44.241
github.com/digitalocean/godo v1.27.0
github.com/ghodss/yaml v1.0.0
github.com/gofrs/uuid v4.0.0+incompatible
@@ -26,30 +26,32 @@ require (
github.com/prometheus/client_golang v1.14.0
github.com/satori/go.uuid v1.2.0
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.8.1
- golang.org/x/crypto v0.5.0
- golang.org/x/net v0.5.0
- golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b
- google.golang.org/api v0.60.0
- google.golang.org/grpc v1.51.0
- google.golang.org/protobuf v1.28.1
- gopkg.in/gcfg.v1 v1.2.0
+ github.com/stretchr/testify v1.8.2
+ golang.org/x/crypto v0.8.0
+ golang.org/x/net v0.9.0
+ golang.org/x/oauth2 v0.7.0
+ google.golang.org/api v0.114.0
+ google.golang.org/grpc v1.54.0
+ google.golang.org/protobuf v1.30.0
+ gopkg.in/gcfg.v1 v1.2.3
gopkg.in/yaml.v2 v2.4.0
- k8s.io/api v0.27.0-alpha.1
- k8s.io/apimachinery v0.27.0-alpha.1
- k8s.io/apiserver v0.27.0-alpha.1
- k8s.io/client-go v0.27.0-alpha.1
- k8s.io/cloud-provider v0.27.0-alpha.1
- k8s.io/component-base v0.27.0-alpha.1
- k8s.io/component-helpers v0.27.0-alpha.1
- k8s.io/klog/v2 v2.80.1
- k8s.io/kubernetes v1.27.0-alpha.1
+ k8s.io/api v0.27.0
+ k8s.io/apimachinery v0.27.0
+ k8s.io/apiserver v0.27.0
+ k8s.io/client-go v0.27.0
+ k8s.io/cloud-provider v0.27.0
+ k8s.io/cloud-provider-aws v1.27.0
+ k8s.io/component-base v0.27.0
+ k8s.io/component-helpers v0.27.0
+ k8s.io/klog/v2 v2.90.1
+ k8s.io/kubernetes v1.27.0
k8s.io/legacy-cloud-providers v0.0.0
- k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
+ k8s.io/utils v0.0.0-20230406110748-d93618cff8a2
sigs.k8s.io/cloud-provider-azure v1.26.2
)
require (
+ cloud.google.com/go/compute v1.19.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect
github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect
@@ -61,56 +63,59 @@ require (
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/Microsoft/hcsshim v0.8.25 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
- github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 // indirect
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e // indirect
+ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
- github.com/cenkalti/backoff/v4 v4.1.3 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect
github.com/cilium/ebpf v0.7.0 // indirect
github.com/container-storage-interface/spec v1.7.0 // indirect
github.com/containerd/cgroups v1.0.1 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/ttrpc v1.1.0 // indirect
- github.com/coreos/go-semver v0.3.0 // indirect
- github.com/coreos/go-systemd/v22 v22.3.2 // indirect
+ github.com/coreos/go-semver v0.3.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect
- github.com/emicklei/go-restful/v3 v3.9.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
- github.com/go-logr/logr v1.2.3 // indirect
+ github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
- github.com/go-openapi/jsonreference v0.20.1 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/godbus/dbus/v5 v5.0.6 // indirect
- github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
+ github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/golang/protobuf v1.5.2 // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
github.com/google/cadvisor v0.47.1 // indirect
- github.com/google/cel-go v0.12.6 // indirect
- github.com/google/gnostic v0.5.7-v3refs // indirect
- github.com/google/gofuzz v1.1.0 // indirect
- github.com/googleapis/gax-go/v2 v2.1.1 // indirect
+ github.com/google/cel-go v0.14.0 // indirect
+ github.com/google/gnostic v0.6.9 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
+ github.com/googleapis/gax-go/v2 v2.7.1 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
- github.com/imdario/mergo v0.3.6 // indirect
- github.com/inconshreveable/mousetrap v1.0.1 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
+ github.com/imdario/mergo v0.3.15 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/karrick/godirwalk v1.17.0 // indirect
github.com/libopenstorage/openstorage v1.0.0 // indirect
github.com/lithammer/dedent v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/ipvs v1.1.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
@@ -126,59 +131,61 @@ require (
github.com/opencontainers/selinux v1.10.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/prometheus/common v0.42.0 // indirect
+ github.com/prometheus/procfs v0.9.0 // indirect
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 // indirect
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
- github.com/spf13/cobra v1.6.1 // indirect
- github.com/stoewer/go-strcase v1.2.0 // indirect
+ github.com/spf13/cobra v1.7.0 // indirect
+ github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/stretchr/objx v0.5.0 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/vishvananda/netlink v1.1.0 // indirect
github.com/vishvananda/netns v0.0.2 // indirect
github.com/vmware/govmomi v0.30.0 // indirect
- go.etcd.io/etcd/api/v3 v3.5.5 // indirect
- go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect
- go.etcd.io/etcd/client/v3 v3.5.5 // indirect
- go.opencensus.io v0.23.0 // indirect
+ go.etcd.io/etcd/api/v3 v3.5.7 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
+ go.etcd.io/etcd/client/v3 v3.5.7 // indirect
+ go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
- go.opentelemetry.io/otel v1.10.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
- go.opentelemetry.io/otel/metric v0.31.0 // indirect
- go.opentelemetry.io/otel/sdk v1.10.0 // indirect
- go.opentelemetry.io/otel/trace v1.10.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect
+ go.opentelemetry.io/otel v1.14.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 // indirect
+ go.opentelemetry.io/otel/metric v0.37.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.14.0 // indirect
+ go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.6.0 // indirect
- go.uber.org/zap v1.19.0 // indirect
+ go.uber.org/atomic v1.10.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.24.0 // indirect
+ golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
golang.org/x/sync v0.1.0 // indirect
- golang.org/x/sys v0.4.0 // indirect
- golang.org/x/term v0.4.0 // indirect
- golang.org/x/text v0.6.0 // indirect
- golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
+ golang.org/x/sys v0.7.0 // indirect
+ golang.org/x/term v0.7.0 // indirect
+ golang.org/x/text v0.9.0 // indirect
+ golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
+ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
- gopkg.in/warnings.v0 v0.1.1 // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
+ gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/controller-manager v0.27.0 // indirect
k8s.io/cri-api v0.0.0 // indirect
- k8s.io/csi-translation-lib v0.27.0-alpha.1 // indirect
+ k8s.io/csi-translation-lib v0.27.0 // indirect
k8s.io/dynamic-resource-allocation v0.0.0 // indirect
- k8s.io/kms v0.27.0-alpha.1 // indirect
- k8s.io/kube-openapi v0.0.0-20230109183929-3758b55a6596 // indirect
+ k8s.io/kms v0.27.0 // indirect
+ k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c // indirect
k8s.io/kube-proxy v0.0.0 // indirect
k8s.io/kube-scheduler v0.0.0 // indirect
k8s.io/kubectl v0.0.0 // indirect
- k8s.io/kubelet v0.27.0-alpha.1 // indirect
+ k8s.io/kubelet v0.27.0 // indirect
k8s.io/mount-utils v0.26.0-alpha.0 // indirect
- sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 // indirect
- sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
@@ -189,62 +196,62 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0
replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0
-replace k8s.io/api => k8s.io/api v0.27.0-alpha.1
+replace k8s.io/api => k8s.io/api v0.27.0
-replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.0-alpha.1
+replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.0
-replace k8s.io/apimachinery => k8s.io/apimachinery v0.27.0-alpha.1
+replace k8s.io/apimachinery => k8s.io/apimachinery v0.27.0
-replace k8s.io/apiserver => k8s.io/apiserver v0.27.0-alpha.1
+replace k8s.io/apiserver => k8s.io/apiserver v0.27.0
-replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.0-alpha.1
+replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.0
-replace k8s.io/client-go => k8s.io/client-go v0.27.0-alpha.1
+replace k8s.io/client-go => k8s.io/client-go v0.27.0
-replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.0-alpha.1
+replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.0
-replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.0-alpha.1
+replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.0
-replace k8s.io/code-generator => k8s.io/code-generator v0.27.0-alpha.1
+replace k8s.io/code-generator => k8s.io/code-generator v0.27.0
-replace k8s.io/component-base => k8s.io/component-base v0.27.0-alpha.1
+replace k8s.io/component-base => k8s.io/component-base v0.27.0
-replace k8s.io/component-helpers => k8s.io/component-helpers v0.27.0-alpha.1
+replace k8s.io/component-helpers => k8s.io/component-helpers v0.27.0
-replace k8s.io/controller-manager => k8s.io/controller-manager v0.27.0-alpha.1
+replace k8s.io/controller-manager => k8s.io/controller-manager v0.27.0
-replace k8s.io/cri-api => k8s.io/cri-api v0.27.0-alpha.1
+replace k8s.io/cri-api => k8s.io/cri-api v0.27.0
-replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.0-alpha.1
+replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.0
-replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.0-alpha.1
+replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.0
-replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.0-alpha.1
+replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.0
-replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.0-alpha.1
+replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.0
-replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.0-alpha.1
+replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.0
-replace k8s.io/kubectl => k8s.io/kubectl v0.27.0-alpha.1
+replace k8s.io/kubectl => k8s.io/kubectl v0.27.0
-replace k8s.io/kubelet => k8s.io/kubelet v0.27.0-alpha.1
+replace k8s.io/kubelet => k8s.io/kubelet v0.27.0
-replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.0-alpha.1
+replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.0
-replace k8s.io/metrics => k8s.io/metrics v0.27.0-alpha.1
+replace k8s.io/metrics => k8s.io/metrics v0.27.0
-replace k8s.io/mount-utils => k8s.io/mount-utils v0.27.0-alpha.1
+replace k8s.io/mount-utils => k8s.io/mount-utils v0.27.0
-replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.0-alpha.1
+replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.0
-replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.0-alpha.1
+replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.0
-replace k8s.io/sample-controller => k8s.io/sample-controller v0.27.0-alpha.1
+replace k8s.io/sample-controller => k8s.io/sample-controller v0.27.0
-replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.0-alpha.1
+replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.0
-replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.0-alpha.1
+replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.0
-replace k8s.io/kms => k8s.io/kms v0.27.0-alpha.1
+replace k8s.io/kms => k8s.io/kms v0.27.0
-replace k8s.io/noderesourcetopology-api => k8s.io/noderesourcetopology-api v0.27.0-alpha.1
+replace k8s.io/noderesourcetopology-api => k8s.io/noderesourcetopology-api v0.27.0
diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum
index 736f5ea8bff7..e8cabd4b8f2c 100644
--- a/cluster-autoscaler/go.sum
+++ b/cluster-autoscaler/go.sum
@@ -25,16 +25,21 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
-cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
+cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -84,7 +89,6 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/Azure/skewer v0.0.14 h1:0mzUJhspECkajYyynYsOCp//E2PSnYXrgP45bcskqfQ=
github.com/Azure/skewer v0.0.14/go.mod h1:6WTecuPyfGtuvS8Mh4JYWuHhO4kcWycGfsUBB+XTFG4=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b h1:Heo1J/ttaQFgGJSVnCZquy3e5eH5j1nqxBuomztB3P0=
@@ -105,17 +109,18 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
-github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 h1:X8MJ0fnN5FPdcGF5Ij2/OW+HgiJrRg3AfHAx1PJtIzM=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
-github.com/aws/aws-sdk-go v1.44.147 h1:C/YQv0QAvRHio4cESBTFGh8aI/JM9VdRislDIOz/Dx4=
-github.com/aws/aws-sdk-go v1.44.147/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.241 h1:D3KycZq3HjhmjYGzvTcmX/Ztf/KNmsfTmdDuKdnzZKo=
+github.com/aws/aws-sdk-go v1.44.241/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -124,14 +129,16 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
+github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -169,12 +176,13 @@ github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcD
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@@ -206,8 +214,8 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
-github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
+github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -224,8 +232,8 @@ github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCv
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -247,18 +255,19 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
-github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro=
@@ -271,8 +280,9 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
-github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
@@ -308,18 +318,19 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/cadvisor v0.47.1 h1:YyKnRy/3myRNGOvF1bNF9FFnpjY7Gky5yKi/ZlN+BSo=
github.com/google/cadvisor v0.47.1/go.mod h1:iJdTjcjyKHjLCf7OSTzwP5GxdfrkPusw2x5bwGvuLUw=
-github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M=
-github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
-github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/google/cel-go v0.14.0 h1:LFobwuUDslWUHdQ48SXVXvQgPH2X1XVhsgOGNioAEZ4=
+github.com/google/cel-go v0.14.0/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY=
+github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=
+github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -338,8 +349,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -357,16 +368,20 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
-github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A=
+github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@@ -377,18 +392,19 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
-github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
+github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
-github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@@ -431,13 +447,16 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
@@ -465,8 +484,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow=
-github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE=
+github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk=
+github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@@ -492,7 +511,6 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
@@ -508,21 +526,23 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
+github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
+github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc=
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -547,15 +567,16 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
-github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
+github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -569,8 +590,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -585,6 +607,9 @@ github.com/vishvananda/netns v0.0.2 h1:Cn05BRLm+iRP/DZxyVSsfVyrzgjDbwHwkVt38qvXn
github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw=
github.com/vmware/govmomi v0.30.0 h1:Fm8ugPnnlMSTSceDKY9goGvjmqc6eQLPUSUeNXdpeXA=
github.com/vmware/govmomi v0.30.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
@@ -596,60 +621,59 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
-go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0=
-go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
-go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8=
-go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
-go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI=
-go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI=
-go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c=
-go.etcd.io/etcd/pkg/v3 v3.5.5 h1:Ablg7T7OkR+AeeeU32kdVhw/AGDsitkKPl7aW73ssjU=
-go.etcd.io/etcd/raft/v3 v3.5.5 h1:Ibz6XyZ60OYyRopu73lLM/P+qco3YtlZMOhnXNS051I=
-go.etcd.io/etcd/server/v3 v3.5.5 h1:jNjYm/9s+f9A9r6+SC4RvNaz6AqixpOvhrFdT0PvIj0=
+go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
+go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
+go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
+go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
+go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU=
+go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
+go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
+go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0=
+go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc=
+go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 h1:KQjX0qQ8H21oBUAvFp4ZLKJMMLIluONvSPDAFIGmX58=
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0/go.mod h1:DQYkU9srMFqLUTVA/7/WlRHdnYDB7wyMMlle2ktMjfI=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 h1:Ajldaqhxqw/gNzQA45IKFWLdG7jZuXX/wBW1d5qvbUI=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 h1:5jD3teb4Qh7mx/nfzq4jO2WFFpvXD0vYWFDrdvNWmXk=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI=
go.opentelemetry.io/contrib/propagators/b3 v1.10.0 h1:6AD2VV8edRdEYNaD8cNckpzgdMLU2kbV9OYyxt2kvCg=
-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
+go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
+go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
+go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs=
+go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s=
+go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
+go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
+go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
+go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
+go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
-go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -661,8 +685,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -673,6 +697,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
+golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -740,14 +766,15 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -766,8 +793,9 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -833,7 +861,6 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -857,14 +884,14 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -876,13 +903,13 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
-golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -900,7 +927,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -940,6 +966,7 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -973,8 +1000,9 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
-google.golang.org/api v0.60.0 h1:eq/zs5WPH4J9undYM9IP1O7dSr7Yh8Y0GtSCpzGzIUk=
google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4=
+google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE=
+google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1014,7 +1042,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -1042,8 +1069,10 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
+google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1070,11 +1099,11 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
+google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1090,8 +1119,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1099,15 +1129,15 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0=
-gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/warnings.v0 v0.1.1 h1:XM28wIgFzaBmeZ5dNHIpWLQpt/9DGKxk+rCg/22nnYE=
-gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1120,7 +1150,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
@@ -1132,61 +1161,65 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.27.0-alpha.1 h1:4L3MEcje+LTMfkZrRaYOzA5a0MTvv3uqonblgQZ39E8=
-k8s.io/api v0.27.0-alpha.1/go.mod h1:pUUR9UVsje2ip8mF3GilkKhUt21HVrMB0x0Mzta5HQw=
-k8s.io/apimachinery v0.27.0-alpha.1 h1:q8VsOXO0tQsvrz6QSvolMvlciOFCRpCgQBD1lw/MHSo=
-k8s.io/apimachinery v0.27.0-alpha.1/go.mod h1:mb1AP2xs7Ajs+OvXRynwIgKVID9/rOtI7lFxIixvFp0=
-k8s.io/apiserver v0.27.0-alpha.1 h1:2bMQrLAKBkn+SSlB5zWjYPBuebh8/oi5aItXPOtNUdU=
-k8s.io/apiserver v0.27.0-alpha.1/go.mod h1:yUdhHGNuvU2CpeigdIu+cKa3wVM93+aZlZU8MhbXH7E=
-k8s.io/client-go v0.27.0-alpha.1 h1:GLfzvPrPjCk/WlcaX1JzpSYEtN1p+j6QgMNoQsj5JIQ=
-k8s.io/client-go v0.27.0-alpha.1/go.mod h1:XGeHlRblXNGUEWfmNxWH66XcNFk1RvwgarhDbUNtpeE=
-k8s.io/cloud-provider v0.27.0-alpha.1 h1:2p28CtLVr41pz6/Il3BnNXuKCeCyeq+itTQBYXBx6Zg=
-k8s.io/cloud-provider v0.27.0-alpha.1/go.mod h1:7u1gjRbf9VDV73sDNqMUi56ipifmyc9USPzZXuOHL9s=
-k8s.io/component-base v0.27.0-alpha.1 h1:+5F9ilxocwZsj+Z8mS6nm64ZYQy6dRB+aYk6D1ApRjU=
-k8s.io/component-base v0.27.0-alpha.1/go.mod h1:5R5aAxOBbwPiDQrgRfV3nQHa+lhOXCUsiXOVWubXKio=
-k8s.io/component-helpers v0.27.0-alpha.1 h1:ben8y2mRXBx6tls8V8PF/MK8GkS6QdFnDjOGZ/KsIPY=
-k8s.io/component-helpers v0.27.0-alpha.1/go.mod h1:AULEoyFqwh3THhMUjrDtWRGqaxx0OpH6s5IfdmU+E6g=
-k8s.io/cri-api v0.27.0-alpha.1 h1:4FujdgMZWoa0KkxipDAJnj564Z+tr1BATHxHZ6MQ6Hk=
-k8s.io/cri-api v0.27.0-alpha.1/go.mod h1:4wcbqN7evDJ7BYMZl9iOVTdh/wtvOc2bqEpBwCk5quY=
-k8s.io/csi-translation-lib v0.27.0-alpha.1 h1:/joQUMhvB8JlZSOKfDcmR5JEaI8HAKXq+z5FHSXnLRg=
-k8s.io/csi-translation-lib v0.27.0-alpha.1/go.mod h1:JcaGK9EA3iU4ZarfNnPVSSlL+ev4S2xveIhrfY+NjxI=
-k8s.io/dynamic-resource-allocation v0.27.0-alpha.1 h1:Ah1EnqYEml3MsOjjASgLY591YuNT9+mxkGHqIPbP8YI=
-k8s.io/dynamic-resource-allocation v0.27.0-alpha.1/go.mod h1:TcVld/WY7j1FroVM6BkBDuhcSRHbXAZe2dJBTrmEK1c=
+k8s.io/api v0.27.0 h1:2owttiA8Oa+J3idFeq8TSnNpm4y6AOGPI3PDbIpp2cE=
+k8s.io/api v0.27.0/go.mod h1:Wl+QRvQlh+T8SK5f4F6YBhhyH6hrFO08nl74xZb1MUE=
+k8s.io/apimachinery v0.27.0 h1:vEyy/PVMbPMCPutrssCVHCf0JNZ0Px+YqPi82K2ALlk=
+k8s.io/apimachinery v0.27.0/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM=
+k8s.io/apiserver v0.27.0 h1:sXt/2yVMebZef6GqJHs4IYHSdSYwwrJCafBV/KSCwDw=
+k8s.io/apiserver v0.27.0/go.mod h1:8heEJ5f6EqiKwXC3Ez3ikgOvGtRSEQG/SQZkhO9UzIg=
+k8s.io/client-go v0.27.0 h1:DyZS1fJkv73tEy7rWv4VF6NwGeJ7SKvNaLRXZBYLA+4=
+k8s.io/client-go v0.27.0/go.mod h1:XVEmpNnM+4JYO3EENoFV/ZDv3KxKVJUnzGo70avk+C4=
+k8s.io/cloud-provider v0.27.0 h1:UWEvGvfd9VDRSrtmek7dDeHfUUtycHyvIO6TGI9bFJE=
+k8s.io/cloud-provider v0.27.0/go.mod h1:hUbqXpAWGaOTUhwL5k2QO9i2l9mEMhdMV9ChbvB3Gmw=
+k8s.io/cloud-provider-aws v1.27.0 h1:PF8YrH8QcN6JoXB3Xxlaz84SBDYMPunJuCc0cPuCWXA=
+k8s.io/cloud-provider-aws v1.27.0/go.mod h1:9vUb5mnVnReSRDBWcBxB1b0HOeEc472iOPmrnwpN9SA=
+k8s.io/component-base v0.27.0 h1:g3/FkscH8Uqg9SiDCEfhfhTVwKiVo4T2+iBwUqiFkMg=
+k8s.io/component-base v0.27.0/go.mod h1:PXyBQd/vYYjqqGB83rnsHffTTG6zlmxZAd0ZSOu6evk=
+k8s.io/component-helpers v0.27.0 h1:rymQGJc4s30hHeb5VGuPdht8gKIPecj+Bw2FOJSavE4=
+k8s.io/component-helpers v0.27.0/go.mod h1:vMjVwym/Y0BVyNvg8a4Et2vyPJAh/JhBM0OTRAt0Ceg=
+k8s.io/controller-manager v0.27.0 h1:xW0V4tXJfxRmc5OEwZn0GHU0auKySRJmlVMS/tqrWPw=
+k8s.io/controller-manager v0.27.0/go.mod h1:E9SEe60LMWkBTe7IUm1pVTrikc5tjzEl6RUNbBUdm3c=
+k8s.io/cri-api v0.27.0 h1:kaOF4faxjECAGu0+ApMt2zHfW7Z7mLu0GWFtaU4fN5E=
+k8s.io/cri-api v0.27.0/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
+k8s.io/csi-translation-lib v0.27.0 h1:1tv+MhNxJRFCmexqoSYXks4N/4zikOrgyLpYF63lXzo=
+k8s.io/csi-translation-lib v0.27.0/go.mod h1:ZOPmKWI/2Ad2GRTIBXzOyX52NmtTcDTsh4GWqoHvHVA=
+k8s.io/dynamic-resource-allocation v0.27.0 h1:MEKgqXhsxN7jJgcyVzsSlGIJ0ryLnj5DalNz+rP50zY=
+k8s.io/dynamic-resource-allocation v0.27.0/go.mod h1:a/1QBgAWnYHP/4/JTuITLrdOWfiuhkRX3mw3ijFYuTo=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kms v0.27.0-alpha.1 h1:BHLcJkYr4X05hWlolKoTT4MvBJOFXciVP3xcMQ/TlXE=
-k8s.io/kms v0.27.0-alpha.1/go.mod h1:YD/RpQRK1iq1ur5ndhskkdKPgHqY0wfCDfEir3lxiFg=
-k8s.io/kube-openapi v0.0.0-20230109183929-3758b55a6596 h1:8cNCQs+WqqnSpZ7y0LMQPKD+RZUHU17VqLPMW3qxnxc=
-k8s.io/kube-openapi v0.0.0-20230109183929-3758b55a6596/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0=
-k8s.io/kube-proxy v0.27.0-alpha.1 h1:qQ/YbTp7k/KxuAv95K/Yq1Ubp0+tW3iBRx8j63IxMgE=
-k8s.io/kube-proxy v0.27.0-alpha.1/go.mod h1:CiQM8Qy1YEbiDgBREpV9GDmenP9X6ehe73KrvVYvrBQ=
-k8s.io/kube-scheduler v0.27.0-alpha.1 h1:8raWcfrlDrKx983wqsdymE2ISlMNbBMPWXZXHA3TJII=
-k8s.io/kube-scheduler v0.27.0-alpha.1/go.mod h1:MIZFneWs5tkB2Sn64Aw6NbzmpuwzseCb9BPa7GlnVLc=
-k8s.io/kubectl v0.27.0-alpha.1 h1:ukrFyro2+pT+ElywCU8BkALLiTCl3mfQk8fnNqKKlzg=
-k8s.io/kubectl v0.27.0-alpha.1/go.mod h1:jCBlSazqrwr8cAlPmXFDg6MjprKyb01L/gFb6frelvE=
-k8s.io/kubelet v0.27.0-alpha.1 h1:areTiWI1hMKxRAXfuJvCQndu9sSqx0H/ooCj0nUzRhg=
-k8s.io/kubelet v0.27.0-alpha.1/go.mod h1:ZeUHblI2ZNrQIZFPu8FK9NSQFG/i4c9uSi7qbpgbkqo=
-k8s.io/kubernetes v1.27.0-alpha.1 h1:O9/j89TAORHqEyfhAEz3RFuVxMe6i3dcohqi40xg5HE=
-k8s.io/kubernetes v1.27.0-alpha.1/go.mod h1:YakiZaor8p/y+3HitKFdE0FyRmjBfQbAjdpYWBgssZ4=
-k8s.io/legacy-cloud-providers v0.27.0-alpha.1 h1:AtjVFtDFlwj/Cn23YQsoAecQlnnTb2lfBVJmsBAQjy0=
-k8s.io/legacy-cloud-providers v0.27.0-alpha.1/go.mod h1:5jGVAF1MlPe/iRLYqtnkLIxK2MqqqUBQTcFlcZYEAY0=
-k8s.io/mount-utils v0.27.0-alpha.1 h1:YDIpnmJxeossNar1BFyb/bdmEtpeC2mp/x3nz66oRc8=
-k8s.io/mount-utils v0.27.0-alpha.1/go.mod h1:p03H0tBxdCqsrjayG3G6fq9whrWOHeFgoUkOI1F7VcQ=
+k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
+k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kms v0.27.0 h1:adCotKQybOjxwbxW7ogXyv8uQGan/3Y126S2aNW4YFY=
+k8s.io/kms v0.27.0/go.mod h1:vI2R4Nhw+PZ+DYtVPVYKsIqip2IYjZWK9bESR64WdIw=
+k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c h1:EFfsozyzZ/pggw5qNx7ftTVZdp7WZl+3ih89GEjYEK8=
+k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
+k8s.io/kube-proxy v0.27.0 h1:agah2aBnBjFsoZsBxPyYU5+0BOpC7P9yxVPr/U0gyCE=
+k8s.io/kube-proxy v0.27.0/go.mod h1:4wF+C295vB266aepEH+6pCJjMuOd6fv0YgdFgJjH3gY=
+k8s.io/kube-scheduler v0.27.0 h1:ea3alhkFy82Kc85X5NKaQz0dftA+c1c5o/oHOkMrpBA=
+k8s.io/kube-scheduler v0.27.0/go.mod h1:J6qRR7a4UIdWb3fhCSfRYae79xqg3KVDqx26MHzNkqY=
+k8s.io/kubectl v0.27.0 h1:ZcWS6ufixDXwovWtzF149gd5GzxdpsIl4YqfioSkq5w=
+k8s.io/kubectl v0.27.0/go.mod h1:tyFzo+6WfbUEccm8rFIliQ79FAmm9uTFN+1oC5Ytamo=
+k8s.io/kubelet v0.27.0 h1:zn70SDJKNmRSFG2qeU2UITzZWdEbLVWIf/u1kd1raUQ=
+k8s.io/kubelet v0.27.0/go.mod h1:Z6ipUvM0AFzUWxvSmot8OodwcMN15lgkFM3bcBexBsI=
+k8s.io/kubernetes v1.27.0 h1:VCI2Qoksx2cv6mHu9g9KVH30ZHNtWSB/+9BtKLSqduM=
+k8s.io/kubernetes v1.27.0/go.mod h1:TTwPjSCKQ+a/NTiFKRGjvOnEaQL8wIG40nsYH8Er4bA=
+k8s.io/legacy-cloud-providers v0.27.0 h1:E8iZLBHTbSoxOXXDpo7a+DbAmzN6XqJSuFkY62kqLLU=
+k8s.io/legacy-cloud-providers v0.27.0/go.mod h1:bIGXGgMpIWlwZ0odAs1dkSSN7v0L6byUf8e7flwe54I=
+k8s.io/mount-utils v0.27.0 h1:hLyzqhLYjIBI1W+6VklbmE6rUOjbYDFdjhhc5q17Vxw=
+k8s.io/mount-utils v0.27.0/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
-k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 h1:MB1zkK+WMOmfLxEpjr1wEmkpcIhZC7kfTkZ0stg5bog=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1/go.mod h1:/4NLd21PQY0B+H+X0aDZdwUiVXYJQl/2NXA5KVtDiP4=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
sigs.k8s.io/cloud-provider-azure v1.26.2 h1://Yr95O53fcY/sakPvXdekCW4o2QKhfs1kNtipR7LpE=
sigs.k8s.io/cloud-provider-azure v1.26.2/go.mod h1:9m8BqB9ubr94uWWgbIY8TyUmHhsE2UEKdAZZG8O/ymc=
-sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
-sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/cluster-autoscaler/vendor/cloud.google.com/go/LICENSE b/cluster-autoscaler/vendor/cloud.google.com/go/compute/LICENSE
similarity index 100%
rename from cluster-autoscaler/vendor/cloud.google.com/go/LICENSE
rename to cluster-autoscaler/vendor/cloud.google.com/go/compute/LICENSE
diff --git a/cluster-autoscaler/vendor/cloud.google.com/go/compute/internal/version.go b/cluster-autoscaler/vendor/cloud.google.com/go/compute/internal/version.go
new file mode 100644
index 000000000000..ac02a3ce126f
--- /dev/null
+++ b/cluster-autoscaler/vendor/cloud.google.com/go/compute/internal/version.go
@@ -0,0 +1,18 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+// Version is the current tagged release of the library.
+const Version = "1.19.0"
diff --git a/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
new file mode 100644
index 000000000000..06b957349afd
--- /dev/null
+++ b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -0,0 +1,19 @@
+# Changes
+
+## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165)
+
+## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430)
+
+## [0.1.0] (2022-10-26)
+
+Initial release of metadata being it's own module.
diff --git a/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/LICENSE b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/LICENSE
new file mode 100644
index 000000000000..d64569567334
--- /dev/null
+++ b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/README.md b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/README.md
new file mode 100644
index 000000000000..f940fb2c85b8
--- /dev/null
+++ b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/README.md
@@ -0,0 +1,27 @@
+# Compute API
+
+[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata)
+
+This is a utility library for communicating with Google Cloud metadata service
+on Google Cloud.
+
+## Install
+
+```bash
+go get cloud.google.com/go/compute/metadata
+```
+
+## Go Version Support
+
+See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
+section in the root directory's README.
+
+## Contributing
+
+Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms. See
+[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
diff --git a/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/metadata.go b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/metadata.go
index b6e1f7b614dd..c17faa142a44 100644
--- a/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -16,7 +16,7 @@
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
-// as documented at https://developers.google.com/compute/docs/metadata.
+// as documented at https://cloud.google.com/compute/docs/metadata/overview.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
@@ -61,14 +61,20 @@ var (
instID = &cachedValue{k: "instance/id", trim: true}
)
-var defaultClient = &Client{hc: &http.Client{
- Transport: &http.Transport{
- Dial: (&net.Dialer{
- Timeout: 2 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- },
-}}
+var defaultClient = &Client{hc: newDefaultHTTPClient()}
+
+func newDefaultHTTPClient() *http.Client {
+ return &http.Client{
+ Transport: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 2 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ IdleConnTimeout: 60 * time.Second,
+ },
+ Timeout: 5 * time.Second,
+ }
+}
// NotDefinedError is returned when requested metadata is not defined.
//
@@ -130,7 +136,7 @@ func testOnGCE() bool {
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
- res, err := defaultClient.hc.Do(req.WithContext(ctx))
+ res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
@@ -140,7 +146,8 @@ func testOnGCE() bool {
}()
go func() {
- addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
+ resolver := &net.Resolver{}
+ addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
if err != nil || len(addrs) == 0 {
resc <- false
return
@@ -323,7 +330,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
break
}
if reqErr != nil {
- return "", "", nil
+ return "", "", reqErr
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
diff --git a/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/tidyfix.go
new file mode 100644
index 000000000000..4cef48500817
--- /dev/null
+++ b/cluster-autoscaler/vendor/cloud.google.com/go/compute/metadata/tidyfix.go
@@ -0,0 +1,23 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the {{.RootMod}} import, won't actually become part of
+// the resultant binary.
+//go:build modhack
+// +build modhack
+
+package metadata
+
+// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "cloud.google.com/go/compute/internal"
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/CHANGELOG.md
deleted file mode 100644
index 52911e4cc5e4..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/CHANGELOG.md
+++ /dev/null
@@ -1,2 +0,0 @@
-# Change History
-
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json
deleted file mode 100644
index ee881857d1cc..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "commit": "bd64220293a403f70ae8beebd56fb86951007acf",
- "readme": "/_/azure-rest-api-specs/specification/compute/resource-manager/readme.md",
- "tag": "package-2021-07-01",
- "use": "@microsoft.azure/autorest.go@2.1.187",
- "repository_url": "https://github.com/Azure/azure-rest-api-specs.git",
- "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-07-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md",
- "additional_properties": {
- "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix"
- }
-}
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go
deleted file mode 100644
index 7953c6555084..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/availabilitysets.go
+++ /dev/null
@@ -1,652 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// AvailabilitySetsClient is the compute Client
-type AvailabilitySetsClient struct {
- BaseClient
-}
-
-// NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient client.
-func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient {
- return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewAvailabilitySetsClientWithBaseURI creates an instance of the AvailabilitySetsClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient {
- return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update an availability set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// availabilitySetName - the name of the availability set.
-// parameters - parameters supplied to the Create Availability Set operation.
-func (client AvailabilitySetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet) (result AvailabilitySet, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, availabilitySetName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.CreateOrUpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure sending request")
- return
- }
-
- result, err = client.CreateOrUpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "availabilitySetName": autorest.Encode("path", availabilitySetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response) (result AvailabilitySet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete an availability set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// availabilitySetName - the name of the availability set.
-func (client AvailabilitySetsClient) Delete(ctx context.Context, resourceGroupName string, availabilitySetName string) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.Delete")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, availabilitySetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.DeleteSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure sending request")
- return
- }
-
- result, err = client.DeleteResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, availabilitySetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "availabilitySetName": autorest.Encode("path", availabilitySetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about an availability set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// availabilitySetName - the name of the availability set.
-func (client AvailabilitySetsClient) Get(ctx context.Context, resourceGroupName string, availabilitySetName string) (result AvailabilitySet, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, availabilitySetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGroupName string, availabilitySetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "availabilitySetName": autorest.Encode("path", availabilitySetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result AvailabilitySet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List lists all availability sets in a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client AvailabilitySetsClient) List(ctx context.Context, resourceGroupName string) (result AvailabilitySetListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.List")
- defer func() {
- sc := -1
- if result.aslr.Response.Response != nil {
- sc = result.aslr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.aslr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.aslr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.aslr.hasNextLink() && result.aslr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result AvailabilitySetListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client AvailabilitySetsClient) listNextResults(ctx context.Context, lastResults AvailabilitySetListResult) (result AvailabilitySetListResult, err error) {
- req, err := lastResults.availabilitySetListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client AvailabilitySetsClient) ListComplete(ctx context.Context, resourceGroupName string) (result AvailabilitySetListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName)
- return
-}
-
-// ListAvailableSizes lists all available virtual machine sizes that can be used to create a new virtual machine in an
-// existing availability set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// availabilitySetName - the name of the availability set.
-func (client AvailabilitySetsClient) ListAvailableSizes(ctx context.Context, resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListAvailableSizes")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListAvailableSizesPreparer(ctx, resourceGroupName, availabilitySetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListAvailableSizesSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListAvailableSizesResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListAvailableSizesPreparer prepares the ListAvailableSizes request.
-func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Context, resourceGroupName string, availabilitySetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "availabilitySetName": autorest.Encode("path", availabilitySetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the
-// http.Response Body if it receives an error.
-func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always
-// closes the http.Response Body.
-func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListBySubscription lists all availability sets in a subscription.
-// Parameters:
-// expand - the expand expression to apply to the operation. Allowed values are 'instanceView'.
-func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context, expand string) (result AvailabilitySetListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.aslr.Response.Response != nil {
- sc = result.aslr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listBySubscriptionNextResults
- req, err := client.ListBySubscriptionPreparer(ctx, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.aslr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", resp, "Failure sending request")
- return
- }
-
- result.aslr, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", resp, "Failure responding to request")
- return
- }
- if result.aslr.hasNextLink() && result.aslr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListBySubscriptionPreparer prepares the ListBySubscription request.
-func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Context, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
-// http.Response Body if it receives an error.
-func (client AvailabilitySetsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
-// closes the http.Response Body.
-func (client AvailabilitySetsClient) ListBySubscriptionResponder(resp *http.Response) (result AvailabilitySetListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listBySubscriptionNextResults retrieves the next set of results, if any.
-func (client AvailabilitySetsClient) listBySubscriptionNextResults(ctx context.Context, lastResults AvailabilitySetListResult) (result AvailabilitySetListResult, err error) {
- req, err := lastResults.availabilitySetListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
-func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Context, expand string) (result AvailabilitySetListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListBySubscription(ctx, expand)
- return
-}
-
-// Update update an availability set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// availabilitySetName - the name of the availability set.
-// parameters - parameters supplied to the Update Availability Set operation.
-func (client AvailabilitySetsClient) Update(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySetUpdate) (result AvailabilitySet, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.Update")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, availabilitySetName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.UpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Update", resp, "Failure sending request")
- return
- }
-
- result, err = client.UpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Update", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySetUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "availabilitySetName": autorest.Encode("path", availabilitySetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client AvailabilitySetsClient) UpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client AvailabilitySetsClient) UpdateResponder(resp *http.Response) (result AvailabilitySet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go
deleted file mode 100644
index a525a29711a9..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservationgroups.go
+++ /dev/null
@@ -1,596 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CapacityReservationGroupsClient is the compute Client
-type CapacityReservationGroupsClient struct {
- BaseClient
-}
-
-// NewCapacityReservationGroupsClient creates an instance of the CapacityReservationGroupsClient client.
-func NewCapacityReservationGroupsClient(subscriptionID string) CapacityReservationGroupsClient {
- return NewCapacityReservationGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCapacityReservationGroupsClientWithBaseURI creates an instance of the CapacityReservationGroupsClient client
-// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewCapacityReservationGroupsClientWithBaseURI(baseURI string, subscriptionID string) CapacityReservationGroupsClient {
- return CapacityReservationGroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate the operation to create or update a capacity reservation group. When updating a capacity reservation
-// group, only tags may be modified. Please refer to https://aka.ms/CapacityReservation for more details.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-// parameters - parameters supplied to the Create capacity reservation Group.
-func (client CapacityReservationGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroup) (result CapacityReservationGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, capacityReservationGroupName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.CreateOrUpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "CreateOrUpdate", resp, "Failure sending request")
- return
- }
-
- result, err = client.CreateOrUpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "CreateOrUpdate", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client CapacityReservationGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroup) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result CapacityReservationGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete a capacity reservation group. This operation is allowed only if all the associated
-// resources are disassociated from the reservation group and all capacity reservations under the reservation group
-// have also been deleted. Please refer to https://aka.ms/CapacityReservation for more details.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-func (client CapacityReservationGroupsClient) Delete(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.Delete")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, capacityReservationGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.DeleteSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Delete", resp, "Failure sending request")
- return
- }
-
- result, err = client.DeleteResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Delete", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client CapacityReservationGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get the operation that retrieves information about a capacity reservation group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-// expand - the expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance
-// views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime
-// properties of a capacity reservation that is managed by the platform and can change outside of control plane
-// operations.
-func (client CapacityReservationGroupsClient) Get(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, expand CapacityReservationGroupInstanceViewTypes) (result CapacityReservationGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, capacityReservationGroupName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client CapacityReservationGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, expand CapacityReservationGroupInstanceViewTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationGroupsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationGroupsClient) GetResponder(resp *http.Response) (result CapacityReservationGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByResourceGroup lists all of the capacity reservation groups in the specified resource group. Use the nextLink
-// property in the response to get the next page of capacity reservation groups.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// expand - the expand expression to apply on the operation. Based on the expand param(s) specified we return
-// Virtual Machine or ScaleSet VM Instance or both resource Ids which are associated to capacity reservation
-// group in the response.
-func (client CapacityReservationGroupsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, expand ExpandTypesForGetCapacityReservationGroups) (result CapacityReservationGroupListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.crglr.Response.Response != nil {
- sc = result.crglr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.crglr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.crglr, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.crglr.hasNextLink() && result.crglr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client CapacityReservationGroupsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, expand ExpandTypesForGetCapacityReservationGroups) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationGroupsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationGroupsClient) ListByResourceGroupResponder(resp *http.Response) (result CapacityReservationGroupListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client CapacityReservationGroupsClient) listByResourceGroupNextResults(ctx context.Context, lastResults CapacityReservationGroupListResult) (result CapacityReservationGroupListResult, err error) {
- req, err := lastResults.capacityReservationGroupListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CapacityReservationGroupsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, expand ExpandTypesForGetCapacityReservationGroups) (result CapacityReservationGroupListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, expand)
- return
-}
-
-// ListBySubscription lists all of the capacity reservation groups in the subscription. Use the nextLink property in
-// the response to get the next page of capacity reservation groups.
-// Parameters:
-// expand - the expand expression to apply on the operation. Based on the expand param(s) specified we return
-// Virtual Machine or ScaleSet VM Instance or both resource Ids which are associated to capacity reservation
-// group in the response.
-func (client CapacityReservationGroupsClient) ListBySubscription(ctx context.Context, expand ExpandTypesForGetCapacityReservationGroups) (result CapacityReservationGroupListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.crglr.Response.Response != nil {
- sc = result.crglr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listBySubscriptionNextResults
- req, err := client.ListBySubscriptionPreparer(ctx, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListBySubscription", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.crglr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListBySubscription", resp, "Failure sending request")
- return
- }
-
- result.crglr, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "ListBySubscription", resp, "Failure responding to request")
- return
- }
- if result.crglr.hasNextLink() && result.crglr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListBySubscriptionPreparer prepares the ListBySubscription request.
-func (client CapacityReservationGroupsClient) ListBySubscriptionPreparer(ctx context.Context, expand ExpandTypesForGetCapacityReservationGroups) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationGroupsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationGroupsClient) ListBySubscriptionResponder(resp *http.Response) (result CapacityReservationGroupListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listBySubscriptionNextResults retrieves the next set of results, if any.
-func (client CapacityReservationGroupsClient) listBySubscriptionNextResults(ctx context.Context, lastResults CapacityReservationGroupListResult) (result CapacityReservationGroupListResult, err error) {
- req, err := lastResults.capacityReservationGroupListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CapacityReservationGroupsClient) ListBySubscriptionComplete(ctx context.Context, expand ExpandTypesForGetCapacityReservationGroups) (result CapacityReservationGroupListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListBySubscription(ctx, expand)
- return
-}
-
-// Update the operation to update a capacity reservation group. When updating a capacity reservation group, only tags
-// may be modified.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-// parameters - parameters supplied to the Update capacity reservation Group operation.
-func (client CapacityReservationGroupsClient) Update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroupUpdate) (result CapacityReservationGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupsClient.Update")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, capacityReservationGroupName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.UpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Update", resp, "Failure sending request")
- return
- }
-
- result, err = client.UpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationGroupsClient", "Update", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client CapacityReservationGroupsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroupUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationGroupsClient) UpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationGroupsClient) UpdateResponder(resp *http.Response) (result CapacityReservationGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go
deleted file mode 100644
index fc894216ae13..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/capacityreservations.go
+++ /dev/null
@@ -1,493 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CapacityReservationsClient is the compute Client
-type CapacityReservationsClient struct {
- BaseClient
-}
-
-// NewCapacityReservationsClient creates an instance of the CapacityReservationsClient client.
-func NewCapacityReservationsClient(subscriptionID string) CapacityReservationsClient {
- return NewCapacityReservationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCapacityReservationsClientWithBaseURI creates an instance of the CapacityReservationsClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewCapacityReservationsClientWithBaseURI(baseURI string, subscriptionID string) CapacityReservationsClient {
- return CapacityReservationsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate the operation to create or update a capacity reservation. Please note some properties can be set only
-// during capacity reservation creation. Please refer to https://aka.ms/CapacityReservation for more details.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-// capacityReservationName - the name of the capacity reservation.
-// parameters - parameters supplied to the Create capacity reservation.
-func (client CapacityReservationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation) (result CapacityReservationsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.CapacityReservationsClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client CapacityReservationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "capacityReservationName": autorest.Encode("path", capacityReservationName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationsClient) CreateOrUpdateSender(req *http.Request) (future CapacityReservationsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationsClient) CreateOrUpdateResponder(resp *http.Response) (result CapacityReservation, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete a capacity reservation. This operation is allowed only when all the associated
-// resources are disassociated from the capacity reservation. Please refer to https://aka.ms/CapacityReservation for
-// more details.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-// capacityReservationName - the name of the capacity reservation.
-func (client CapacityReservationsClient) Delete(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string) (result CapacityReservationsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client CapacityReservationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "capacityReservationName": autorest.Encode("path", capacityReservationName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationsClient) DeleteSender(req *http.Request) (future CapacityReservationsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get the operation that retrieves information about the capacity reservation.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-// capacityReservationName - the name of the capacity reservation.
-// expand - the expand expression to apply on the operation. 'InstanceView' retrieves a snapshot of the runtime
-// properties of the capacity reservation that is managed by the platform and can change outside of control
-// plane operations.
-func (client CapacityReservationsClient) Get(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, expand CapacityReservationInstanceViewTypes) (result CapacityReservation, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client CapacityReservationsClient) GetPreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, expand CapacityReservationInstanceViewTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "capacityReservationName": autorest.Encode("path", capacityReservationName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationsClient) GetResponder(resp *http.Response) (result CapacityReservation, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByCapacityReservationGroup lists all of the capacity reservations in the specified capacity reservation group.
-// Use the nextLink property in the response to get the next page of capacity reservations.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-func (client CapacityReservationsClient) ListByCapacityReservationGroup(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (result CapacityReservationListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.ListByCapacityReservationGroup")
- defer func() {
- sc := -1
- if result.crlr.Response.Response != nil {
- sc = result.crlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByCapacityReservationGroupNextResults
- req, err := client.ListByCapacityReservationGroupPreparer(ctx, resourceGroupName, capacityReservationGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "ListByCapacityReservationGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByCapacityReservationGroupSender(req)
- if err != nil {
- result.crlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "ListByCapacityReservationGroup", resp, "Failure sending request")
- return
- }
-
- result.crlr, err = client.ListByCapacityReservationGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "ListByCapacityReservationGroup", resp, "Failure responding to request")
- return
- }
- if result.crlr.hasNextLink() && result.crlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByCapacityReservationGroupPreparer prepares the ListByCapacityReservationGroup request.
-func (client CapacityReservationsClient) ListByCapacityReservationGroupPreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByCapacityReservationGroupSender sends the ListByCapacityReservationGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationsClient) ListByCapacityReservationGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByCapacityReservationGroupResponder handles the response to the ListByCapacityReservationGroup request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationsClient) ListByCapacityReservationGroupResponder(resp *http.Response) (result CapacityReservationListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByCapacityReservationGroupNextResults retrieves the next set of results, if any.
-func (client CapacityReservationsClient) listByCapacityReservationGroupNextResults(ctx context.Context, lastResults CapacityReservationListResult) (result CapacityReservationListResult, err error) {
- req, err := lastResults.capacityReservationListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "listByCapacityReservationGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByCapacityReservationGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "listByCapacityReservationGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByCapacityReservationGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "listByCapacityReservationGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByCapacityReservationGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CapacityReservationsClient) ListByCapacityReservationGroupComplete(ctx context.Context, resourceGroupName string, capacityReservationGroupName string) (result CapacityReservationListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.ListByCapacityReservationGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByCapacityReservationGroup(ctx, resourceGroupName, capacityReservationGroupName)
- return
-}
-
-// Update the operation to update a capacity reservation.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// capacityReservationGroupName - the name of the capacity reservation group.
-// capacityReservationName - the name of the capacity reservation.
-// parameters - parameters supplied to the Update capacity reservation operation.
-func (client CapacityReservationsClient) Update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate) (result CapacityReservationsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client CapacityReservationsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "capacityReservationGroupName": autorest.Encode("path", capacityReservationGroupName),
- "capacityReservationName": autorest.Encode("path", capacityReservationName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client CapacityReservationsClient) UpdateSender(req *http.Request) (future CapacityReservationsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client CapacityReservationsClient) UpdateResponder(resp *http.Response) (result CapacityReservation, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/client.go
deleted file mode 100644
index c7c4543154d5..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/client.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details.
-//
-// Package compute implements the Azure ARM Compute service API version .
-//
-// Compute Client
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "github.com/Azure/go-autorest/autorest"
-)
-
-const (
- // DefaultBaseURI is the default URI used for the service Compute
- DefaultBaseURI = "https://management.azure.com"
-)
-
-// BaseClient is the base client for Compute.
-type BaseClient struct {
- autorest.Client
- BaseURI string
- SubscriptionID string
-}
-
-// New creates an instance of the BaseClient client.
-func New(subscriptionID string) BaseClient {
- return NewWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with
-// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
- return BaseClient{
- Client: autorest.NewClientWithUserAgent(UserAgent()),
- BaseURI: baseURI,
- SubscriptionID: subscriptionID,
- }
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceoperatingsystems.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceoperatingsystems.go
deleted file mode 100644
index 7dfce08d9096..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceoperatingsystems.go
+++ /dev/null
@@ -1,422 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CloudServiceOperatingSystemsClient is the compute Client
-type CloudServiceOperatingSystemsClient struct {
- BaseClient
-}
-
-// NewCloudServiceOperatingSystemsClient creates an instance of the CloudServiceOperatingSystemsClient client.
-func NewCloudServiceOperatingSystemsClient(subscriptionID string) CloudServiceOperatingSystemsClient {
- return NewCloudServiceOperatingSystemsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCloudServiceOperatingSystemsClientWithBaseURI creates an instance of the CloudServiceOperatingSystemsClient
-// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI
-// (sovereign clouds, Azure stack).
-func NewCloudServiceOperatingSystemsClientWithBaseURI(baseURI string, subscriptionID string) CloudServiceOperatingSystemsClient {
- return CloudServiceOperatingSystemsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// GetOSFamily gets properties of a guest operating system family that can be specified in the XML service
-// configuration (.cscfg) for a cloud service.
-// Parameters:
-// location - name of the location that the OS family pertains to.
-// osFamilyName - name of the OS family.
-func (client CloudServiceOperatingSystemsClient) GetOSFamily(ctx context.Context, location string, osFamilyName string) (result OSFamily, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.GetOSFamily")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetOSFamilyPreparer(ctx, location, osFamilyName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSFamily", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetOSFamilySender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSFamily", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetOSFamilyResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSFamily", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetOSFamilyPreparer prepares the GetOSFamily request.
-func (client CloudServiceOperatingSystemsClient) GetOSFamilyPreparer(ctx context.Context, location string, osFamilyName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "osFamilyName": autorest.Encode("path", osFamilyName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsFamilies/{osFamilyName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetOSFamilySender sends the GetOSFamily request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceOperatingSystemsClient) GetOSFamilySender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetOSFamilyResponder handles the response to the GetOSFamily request. The method always
-// closes the http.Response Body.
-func (client CloudServiceOperatingSystemsClient) GetOSFamilyResponder(resp *http.Response) (result OSFamily, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetOSVersion gets properties of a guest operating system version that can be specified in the XML service
-// configuration (.cscfg) for a cloud service.
-// Parameters:
-// location - name of the location that the OS version pertains to.
-// osVersionName - name of the OS version.
-func (client CloudServiceOperatingSystemsClient) GetOSVersion(ctx context.Context, location string, osVersionName string) (result OSVersion, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.GetOSVersion")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetOSVersionPreparer(ctx, location, osVersionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSVersion", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetOSVersionSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSVersion", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetOSVersionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "GetOSVersion", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetOSVersionPreparer prepares the GetOSVersion request.
-func (client CloudServiceOperatingSystemsClient) GetOSVersionPreparer(ctx context.Context, location string, osVersionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "osVersionName": autorest.Encode("path", osVersionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsVersions/{osVersionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetOSVersionSender sends the GetOSVersion request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceOperatingSystemsClient) GetOSVersionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetOSVersionResponder handles the response to the GetOSVersion request. The method always
-// closes the http.Response Body.
-func (client CloudServiceOperatingSystemsClient) GetOSVersionResponder(resp *http.Response) (result OSVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListOSFamilies gets a list of all guest operating system families available to be specified in the XML service
-// configuration (.cscfg) for a cloud service. Use nextLink property in the response to get the next page of OS
-// Families. Do this till nextLink is null to fetch all the OS Families.
-// Parameters:
-// location - name of the location that the OS families pertain to.
-func (client CloudServiceOperatingSystemsClient) ListOSFamilies(ctx context.Context, location string) (result OSFamilyListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.ListOSFamilies")
- defer func() {
- sc := -1
- if result.oflr.Response.Response != nil {
- sc = result.oflr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listOSFamiliesNextResults
- req, err := client.ListOSFamiliesPreparer(ctx, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSFamilies", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListOSFamiliesSender(req)
- if err != nil {
- result.oflr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSFamilies", resp, "Failure sending request")
- return
- }
-
- result.oflr, err = client.ListOSFamiliesResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSFamilies", resp, "Failure responding to request")
- return
- }
- if result.oflr.hasNextLink() && result.oflr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListOSFamiliesPreparer prepares the ListOSFamilies request.
-func (client CloudServiceOperatingSystemsClient) ListOSFamiliesPreparer(ctx context.Context, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsFamilies", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListOSFamiliesSender sends the ListOSFamilies request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceOperatingSystemsClient) ListOSFamiliesSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListOSFamiliesResponder handles the response to the ListOSFamilies request. The method always
-// closes the http.Response Body.
-func (client CloudServiceOperatingSystemsClient) ListOSFamiliesResponder(resp *http.Response) (result OSFamilyListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listOSFamiliesNextResults retrieves the next set of results, if any.
-func (client CloudServiceOperatingSystemsClient) listOSFamiliesNextResults(ctx context.Context, lastResults OSFamilyListResult) (result OSFamilyListResult, err error) {
- req, err := lastResults.oSFamilyListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSFamiliesNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListOSFamiliesSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSFamiliesNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListOSFamiliesResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSFamiliesNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListOSFamiliesComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CloudServiceOperatingSystemsClient) ListOSFamiliesComplete(ctx context.Context, location string) (result OSFamilyListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.ListOSFamilies")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListOSFamilies(ctx, location)
- return
-}
-
-// ListOSVersions gets a list of all guest operating system versions available to be specified in the XML service
-// configuration (.cscfg) for a cloud service. Use nextLink property in the response to get the next page of OS
-// versions. Do this till nextLink is null to fetch all the OS versions.
-// Parameters:
-// location - name of the location that the OS versions pertain to.
-func (client CloudServiceOperatingSystemsClient) ListOSVersions(ctx context.Context, location string) (result OSVersionListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.ListOSVersions")
- defer func() {
- sc := -1
- if result.ovlr.Response.Response != nil {
- sc = result.ovlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listOSVersionsNextResults
- req, err := client.ListOSVersionsPreparer(ctx, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSVersions", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListOSVersionsSender(req)
- if err != nil {
- result.ovlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSVersions", resp, "Failure sending request")
- return
- }
-
- result.ovlr, err = client.ListOSVersionsResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "ListOSVersions", resp, "Failure responding to request")
- return
- }
- if result.ovlr.hasNextLink() && result.ovlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListOSVersionsPreparer prepares the ListOSVersions request.
-func (client CloudServiceOperatingSystemsClient) ListOSVersionsPreparer(ctx context.Context, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsVersions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListOSVersionsSender sends the ListOSVersions request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceOperatingSystemsClient) ListOSVersionsSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListOSVersionsResponder handles the response to the ListOSVersions request. The method always
-// closes the http.Response Body.
-func (client CloudServiceOperatingSystemsClient) ListOSVersionsResponder(resp *http.Response) (result OSVersionListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listOSVersionsNextResults retrieves the next set of results, if any.
-func (client CloudServiceOperatingSystemsClient) listOSVersionsNextResults(ctx context.Context, lastResults OSVersionListResult) (result OSVersionListResult, err error) {
- req, err := lastResults.oSVersionListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSVersionsNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListOSVersionsSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSVersionsNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListOSVersionsResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceOperatingSystemsClient", "listOSVersionsNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListOSVersionsComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CloudServiceOperatingSystemsClient) ListOSVersionsComplete(ctx context.Context, location string) (result OSVersionListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceOperatingSystemsClient.ListOSVersions")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListOSVersions(ctx, location)
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroleinstances.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroleinstances.go
deleted file mode 100644
index 4edea0aec5d7..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroleinstances.go
+++ /dev/null
@@ -1,699 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CloudServiceRoleInstancesClient is the compute Client
-type CloudServiceRoleInstancesClient struct {
- BaseClient
-}
-
-// NewCloudServiceRoleInstancesClient creates an instance of the CloudServiceRoleInstancesClient client.
-func NewCloudServiceRoleInstancesClient(subscriptionID string) CloudServiceRoleInstancesClient {
- return NewCloudServiceRoleInstancesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCloudServiceRoleInstancesClientWithBaseURI creates an instance of the CloudServiceRoleInstancesClient client
-// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewCloudServiceRoleInstancesClientWithBaseURI(baseURI string, subscriptionID string) CloudServiceRoleInstancesClient {
- return CloudServiceRoleInstancesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Delete deletes a role instance from a cloud service.
-// Parameters:
-// roleInstanceName - name of the role instance.
-func (client CloudServiceRoleInstancesClient) Delete(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleInstancesDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client CloudServiceRoleInstancesClient) DeletePreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "roleInstanceName": autorest.Encode("path", roleInstanceName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRoleInstancesClient) DeleteSender(req *http.Request) (future CloudServiceRoleInstancesDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRoleInstancesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get gets a role instance from a cloud service.
-// Parameters:
-// roleInstanceName - name of the role instance.
-// expand - the expand expression to apply to the operation. 'UserData' is not supported for cloud services.
-func (client CloudServiceRoleInstancesClient) Get(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (result RoleInstance, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client CloudServiceRoleInstancesClient) GetPreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "roleInstanceName": autorest.Encode("path", roleInstanceName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRoleInstancesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRoleInstancesClient) GetResponder(resp *http.Response) (result RoleInstance, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetInstanceView retrieves information about the run-time state of a role instance in a cloud service.
-// Parameters:
-// roleInstanceName - name of the role instance.
-func (client CloudServiceRoleInstancesClient) GetInstanceView(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result RoleInstanceInstanceView, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.GetInstanceView")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetInstanceViewPreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetInstanceView", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetInstanceViewSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetInstanceView", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetInstanceViewResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetInstanceView", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetInstanceViewPreparer prepares the GetInstanceView request.
-func (client CloudServiceRoleInstancesClient) GetInstanceViewPreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "roleInstanceName": autorest.Encode("path", roleInstanceName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/instanceView", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetInstanceViewSender sends the GetInstanceView request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRoleInstancesClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRoleInstancesClient) GetInstanceViewResponder(resp *http.Response) (result RoleInstanceInstanceView, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetRemoteDesktopFile gets a remote desktop file for a role instance in a cloud service.
-// Parameters:
-// roleInstanceName - name of the role instance.
-func (client CloudServiceRoleInstancesClient) GetRemoteDesktopFile(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result ReadCloser, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.GetRemoteDesktopFile")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetRemoteDesktopFilePreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetRemoteDesktopFile", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetRemoteDesktopFileSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetRemoteDesktopFile", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetRemoteDesktopFileResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "GetRemoteDesktopFile", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetRemoteDesktopFilePreparer prepares the GetRemoteDesktopFile request.
-func (client CloudServiceRoleInstancesClient) GetRemoteDesktopFilePreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "roleInstanceName": autorest.Encode("path", roleInstanceName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/remoteDesktopFile", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetRemoteDesktopFileSender sends the GetRemoteDesktopFile request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRoleInstancesClient) GetRemoteDesktopFileSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetRemoteDesktopFileResponder handles the response to the GetRemoteDesktopFile request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRoleInstancesClient) GetRemoteDesktopFileResponder(resp *http.Response) (result ReadCloser, err error) {
- result.Value = &resp.Body
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK))
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets the list of all role instances in a cloud service. Use nextLink property in the response to get the next
-// page of role instances. Do this till nextLink is null to fetch all the role instances.
-// Parameters:
-// expand - the expand expression to apply to the operation. 'UserData' is not supported for cloud services.
-func (client CloudServiceRoleInstancesClient) List(ctx context.Context, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (result RoleInstanceListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.List")
- defer func() {
- sc := -1
- if result.rilr.Response.Response != nil {
- sc = result.rilr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName, cloudServiceName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.rilr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.rilr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.rilr.hasNextLink() && result.rilr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client CloudServiceRoleInstancesClient) ListPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRoleInstancesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRoleInstancesClient) ListResponder(resp *http.Response) (result RoleInstanceListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client CloudServiceRoleInstancesClient) listNextResults(ctx context.Context, lastResults RoleInstanceListResult) (result RoleInstanceListResult, err error) {
- req, err := lastResults.roleInstanceListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CloudServiceRoleInstancesClient) ListComplete(ctx context.Context, resourceGroupName string, cloudServiceName string, expand InstanceViewTypes) (result RoleInstanceListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName, cloudServiceName, expand)
- return
-}
-
-// Rebuild the Rebuild Role Instance asynchronous operation reinstalls the operating system on instances of web roles
-// or worker roles and initializes the storage resources that are used by them. If you do not want to initialize
-// storage resources, you can use Reimage Role Instance.
-// Parameters:
-// roleInstanceName - name of the role instance.
-func (client CloudServiceRoleInstancesClient) Rebuild(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleInstancesRebuildFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Rebuild")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RebuildPreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Rebuild", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RebuildSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Rebuild", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RebuildPreparer prepares the Rebuild request.
-func (client CloudServiceRoleInstancesClient) RebuildPreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "roleInstanceName": autorest.Encode("path", roleInstanceName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/rebuild", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RebuildSender sends the Rebuild request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRoleInstancesClient) RebuildSender(req *http.Request) (future CloudServiceRoleInstancesRebuildFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RebuildResponder handles the response to the Rebuild request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRoleInstancesClient) RebuildResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Reimage the Reimage Role Instance asynchronous operation reinstalls the operating system on instances of web roles
-// or worker roles.
-// Parameters:
-// roleInstanceName - name of the role instance.
-func (client CloudServiceRoleInstancesClient) Reimage(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleInstancesReimageFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Reimage")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ReimagePreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Reimage", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ReimageSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Reimage", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ReimagePreparer prepares the Reimage request.
-func (client CloudServiceRoleInstancesClient) ReimagePreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "roleInstanceName": autorest.Encode("path", roleInstanceName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/reimage", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ReimageSender sends the Reimage request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRoleInstancesClient) ReimageSender(req *http.Request) (future CloudServiceRoleInstancesReimageFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ReimageResponder handles the response to the Reimage request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRoleInstancesClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Restart the Reboot Role Instance asynchronous operation requests a reboot of a role instance in the cloud service.
-// Parameters:
-// roleInstanceName - name of the role instance.
-func (client CloudServiceRoleInstancesClient) Restart(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleInstancesRestartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleInstancesClient.Restart")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RestartPreparer(ctx, roleInstanceName, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Restart", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RestartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesClient", "Restart", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RestartPreparer prepares the Restart request.
-func (client CloudServiceRoleInstancesClient) RestartPreparer(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "roleInstanceName": autorest.Encode("path", roleInstanceName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/restart", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RestartSender sends the Restart request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRoleInstancesClient) RestartSender(req *http.Request) (future CloudServiceRoleInstancesRestartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RestartResponder handles the response to the Restart request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRoleInstancesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroles.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroles.go
deleted file mode 100644
index dac28bd988b1..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudserviceroles.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CloudServiceRolesClient is the compute Client
-type CloudServiceRolesClient struct {
- BaseClient
-}
-
-// NewCloudServiceRolesClient creates an instance of the CloudServiceRolesClient client.
-func NewCloudServiceRolesClient(subscriptionID string) CloudServiceRolesClient {
- return NewCloudServiceRolesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCloudServiceRolesClientWithBaseURI creates an instance of the CloudServiceRolesClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewCloudServiceRolesClientWithBaseURI(baseURI string, subscriptionID string) CloudServiceRolesClient {
- return CloudServiceRolesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get gets a role from a cloud service.
-// Parameters:
-// roleName - name of the role.
-func (client CloudServiceRolesClient) Get(ctx context.Context, roleName string, resourceGroupName string, cloudServiceName string) (result CloudServiceRole, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRolesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, roleName, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client CloudServiceRolesClient) GetPreparer(ctx context.Context, roleName string, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "roleName": autorest.Encode("path", roleName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles/{roleName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRolesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRolesClient) GetResponder(resp *http.Response) (result CloudServiceRole, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets a list of all roles in a cloud service. Use nextLink property in the response to get the next page of
-// roles. Do this till nextLink is null to fetch all the roles.
-func (client CloudServiceRolesClient) List(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRolesClient.List")
- defer func() {
- sc := -1
- if result.csrlr.Response.Response != nil {
- sc = result.csrlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.csrlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.csrlr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.csrlr.hasNextLink() && result.csrlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client CloudServiceRolesClient) ListPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServiceRolesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client CloudServiceRolesClient) ListResponder(resp *http.Response) (result CloudServiceRoleListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client CloudServiceRolesClient) listNextResults(ctx context.Context, lastResults CloudServiceRoleListResult) (result CloudServiceRoleListResult, err error) {
- req, err := lastResults.cloudServiceRoleListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRolesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CloudServiceRolesClient) ListComplete(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServiceRoleListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRolesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName, cloudServiceName)
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservices.go
deleted file mode 100644
index 7086dd644a08..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservices.go
+++ /dev/null
@@ -1,1198 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CloudServicesClient is the compute Client
-type CloudServicesClient struct {
- BaseClient
-}
-
-// NewCloudServicesClient creates an instance of the CloudServicesClient client.
-func NewCloudServicesClient(subscriptionID string) CloudServicesClient {
- return NewCloudServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCloudServicesClientWithBaseURI creates an instance of the CloudServicesClient client using a custom endpoint.
-// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewCloudServicesClientWithBaseURI(baseURI string, subscriptionID string) CloudServicesClient {
- return CloudServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a cloud service. Please note some properties can be set only during cloud service
-// creation.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-// parameters - the cloud service object.
-func (client CloudServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *CloudService) (result CloudServicesCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
- return result, validation.NewError("compute.CloudServicesClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, cloudServiceName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client CloudServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *CloudService) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- parameters.ID = nil
- parameters.Name = nil
- parameters.Type = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if parameters != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(parameters))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) CreateOrUpdateSender(req *http.Request) (future CloudServicesCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) CreateOrUpdateResponder(resp *http.Response) (result CloudService, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete deletes a cloud service.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-func (client CloudServicesClient) Delete(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServicesDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client CloudServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) DeleteSender(req *http.Request) (future CloudServicesDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// DeleteInstances deletes role instances in a cloud service.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-// parameters - list of cloud service role instance names.
-func (client CloudServicesClient) DeleteInstances(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (result CloudServicesDeleteInstancesFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.DeleteInstances")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RoleInstances", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
- return result, validation.NewError("compute.CloudServicesClient", "DeleteInstances", err.Error())
- }
-
- req, err := client.DeleteInstancesPreparer(ctx, resourceGroupName, cloudServiceName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "DeleteInstances", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteInstancesSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "DeleteInstances", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeleteInstancesPreparer prepares the DeleteInstances request.
-func (client CloudServicesClient) DeleteInstancesPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/delete", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if parameters != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(parameters))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteInstancesSender sends the DeleteInstances request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) DeleteInstancesSender(req *http.Request) (future CloudServicesDeleteInstancesFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteInstancesResponder handles the response to the DeleteInstances request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) DeleteInstancesResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get display information about a cloud service.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-func (client CloudServicesClient) Get(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudService, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client CloudServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) GetResponder(resp *http.Response) (result CloudService, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetInstanceView gets the status of a cloud service.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-func (client CloudServicesClient) GetInstanceView(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServiceInstanceView, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.GetInstanceView")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetInstanceViewPreparer(ctx, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "GetInstanceView", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetInstanceViewSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "GetInstanceView", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetInstanceViewResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "GetInstanceView", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetInstanceViewPreparer prepares the GetInstanceView request.
-func (client CloudServicesClient) GetInstanceViewPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/instanceView", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetInstanceViewSender sends the GetInstanceView request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) GetInstanceViewResponder(resp *http.Response) (result CloudServiceInstanceView, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets a list of all cloud services under a resource group. Use nextLink property in the response to get the next
-// page of Cloud Services. Do this till nextLink is null to fetch all the Cloud Services.
-// Parameters:
-// resourceGroupName - name of the resource group.
-func (client CloudServicesClient) List(ctx context.Context, resourceGroupName string) (result CloudServiceListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.List")
- defer func() {
- sc := -1
- if result.cslr.Response.Response != nil {
- sc = result.cslr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.cslr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.cslr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.cslr.hasNextLink() && result.cslr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client CloudServicesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) ListResponder(resp *http.Response) (result CloudServiceListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client CloudServicesClient) listNextResults(ctx context.Context, lastResults CloudServiceListResult) (result CloudServiceListResult, err error) {
- req, err := lastResults.cloudServiceListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CloudServicesClient) ListComplete(ctx context.Context, resourceGroupName string) (result CloudServiceListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName)
- return
-}
-
-// ListAll gets a list of all cloud services in the subscription, regardless of the associated resource group. Use
-// nextLink property in the response to get the next page of Cloud Services. Do this till nextLink is null to fetch all
-// the Cloud Services.
-func (client CloudServicesClient) ListAll(ctx context.Context) (result CloudServiceListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.ListAll")
- defer func() {
- sc := -1
- if result.cslr.Response.Response != nil {
- sc = result.cslr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listAllNextResults
- req, err := client.ListAllPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "ListAll", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListAllSender(req)
- if err != nil {
- result.cslr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "ListAll", resp, "Failure sending request")
- return
- }
-
- result.cslr, err = client.ListAllResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "ListAll", resp, "Failure responding to request")
- return
- }
- if result.cslr.hasNextLink() && result.cslr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListAllPreparer prepares the ListAll request.
-func (client CloudServicesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListAllSender sends the ListAll request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) ListAllSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListAllResponder handles the response to the ListAll request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) ListAllResponder(resp *http.Response) (result CloudServiceListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listAllNextResults retrieves the next set of results, if any.
-func (client CloudServicesClient) listAllNextResults(ctx context.Context, lastResults CloudServiceListResult) (result CloudServiceListResult, err error) {
- req, err := lastResults.cloudServiceListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listAllNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListAllSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listAllNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListAllResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "listAllNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CloudServicesClient) ListAllComplete(ctx context.Context) (result CloudServiceListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.ListAll")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListAll(ctx)
- return
-}
-
-// PowerOff power off the cloud service. Note that resources are still attached and you are getting charged for the
-// resources.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-func (client CloudServicesClient) PowerOff(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServicesPowerOffFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.PowerOff")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.PowerOffPreparer(ctx, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "PowerOff", nil, "Failure preparing request")
- return
- }
-
- result, err = client.PowerOffSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "PowerOff", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// PowerOffPreparer prepares the PowerOff request.
-func (client CloudServicesClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/poweroff", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// PowerOffSender sends the PowerOff request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) PowerOffSender(req *http.Request) (future CloudServicesPowerOffFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// PowerOffResponder handles the response to the PowerOff request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Rebuild rebuild Role Instances reinstalls the operating system on instances of web roles or worker roles and
-// initializes the storage resources that are used by them. If you do not want to initialize storage resources, you can
-// use Reimage Role Instances.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-// parameters - list of cloud service role instance names.
-func (client CloudServicesClient) Rebuild(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (result CloudServicesRebuildFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Rebuild")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RoleInstances", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
- return result, validation.NewError("compute.CloudServicesClient", "Rebuild", err.Error())
- }
-
- req, err := client.RebuildPreparer(ctx, resourceGroupName, cloudServiceName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Rebuild", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RebuildSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Rebuild", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RebuildPreparer prepares the Rebuild request.
-func (client CloudServicesClient) RebuildPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/rebuild", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if parameters != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(parameters))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RebuildSender sends the Rebuild request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) RebuildSender(req *http.Request) (future CloudServicesRebuildFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RebuildResponder handles the response to the Rebuild request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) RebuildResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Reimage reimage asynchronous operation reinstalls the operating system on instances of web roles or worker roles.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-// parameters - list of cloud service role instance names.
-func (client CloudServicesClient) Reimage(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (result CloudServicesReimageFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Reimage")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RoleInstances", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
- return result, validation.NewError("compute.CloudServicesClient", "Reimage", err.Error())
- }
-
- req, err := client.ReimagePreparer(ctx, resourceGroupName, cloudServiceName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Reimage", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ReimageSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Reimage", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ReimagePreparer prepares the Reimage request.
-func (client CloudServicesClient) ReimagePreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/reimage", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if parameters != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(parameters))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ReimageSender sends the Reimage request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) ReimageSender(req *http.Request) (future CloudServicesReimageFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ReimageResponder handles the response to the Reimage request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Restart restarts one or more role instances in a cloud service.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-// parameters - list of cloud service role instance names.
-func (client CloudServicesClient) Restart(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (result CloudServicesRestartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Restart")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RoleInstances", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
- return result, validation.NewError("compute.CloudServicesClient", "Restart", err.Error())
- }
-
- req, err := client.RestartPreparer(ctx, resourceGroupName, cloudServiceName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Restart", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RestartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Restart", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RestartPreparer prepares the Restart request.
-func (client CloudServicesClient) RestartPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *RoleInstances) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/restart", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if parameters != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(parameters))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RestartSender sends the Restart request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) RestartSender(req *http.Request) (future CloudServicesRestartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RestartResponder handles the response to the Restart request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Start starts the cloud service.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-func (client CloudServicesClient) Start(ctx context.Context, resourceGroupName string, cloudServiceName string) (result CloudServicesStartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Start")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.StartPreparer(ctx, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Start", nil, "Failure preparing request")
- return
- }
-
- result, err = client.StartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Start", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// StartPreparer prepares the Start request.
-func (client CloudServicesClient) StartPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/start", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// StartSender sends the Start request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) StartSender(req *http.Request) (future CloudServicesStartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// StartResponder handles the response to the Start request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Update update a cloud service.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-// parameters - the cloud service object.
-func (client CloudServicesClient) Update(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *CloudServiceUpdate) (result CloudServicesUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, cloudServiceName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client CloudServicesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters *CloudServiceUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if parameters != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(parameters))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesClient) UpdateSender(req *http.Request) (future CloudServicesUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client CloudServicesClient) UpdateResponder(resp *http.Response) (result CloudService, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservicesupdatedomain.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservicesupdatedomain.go
deleted file mode 100644
index eabe4deefce2..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/cloudservicesupdatedomain.go
+++ /dev/null
@@ -1,319 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CloudServicesUpdateDomainClient is the compute Client
-type CloudServicesUpdateDomainClient struct {
- BaseClient
-}
-
-// NewCloudServicesUpdateDomainClient creates an instance of the CloudServicesUpdateDomainClient client.
-func NewCloudServicesUpdateDomainClient(subscriptionID string) CloudServicesUpdateDomainClient {
- return NewCloudServicesUpdateDomainClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCloudServicesUpdateDomainClientWithBaseURI creates an instance of the CloudServicesUpdateDomainClient client
-// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewCloudServicesUpdateDomainClientWithBaseURI(baseURI string, subscriptionID string) CloudServicesUpdateDomainClient {
- return CloudServicesUpdateDomainClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// GetUpdateDomain gets the specified update domain of a cloud service. Use nextLink property in the response to get
-// the next page of update domains. Do this till nextLink is null to fetch all the update domains.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-// updateDomain - specifies an integer value that identifies the update domain. Update domains are identified
-// with a zero-based index: the first update domain has an ID of 0, the second has an ID of 1, and so on.
-func (client CloudServicesUpdateDomainClient) GetUpdateDomain(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32) (result UpdateDomain, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesUpdateDomainClient.GetUpdateDomain")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetUpdateDomainPreparer(ctx, resourceGroupName, cloudServiceName, updateDomain)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "GetUpdateDomain", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetUpdateDomainSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "GetUpdateDomain", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetUpdateDomainResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "GetUpdateDomain", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetUpdateDomainPreparer prepares the GetUpdateDomain request.
-func (client CloudServicesUpdateDomainClient) GetUpdateDomainPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "updateDomain": autorest.Encode("path", updateDomain),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains/{updateDomain}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetUpdateDomainSender sends the GetUpdateDomain request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesUpdateDomainClient) GetUpdateDomainSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetUpdateDomainResponder handles the response to the GetUpdateDomain request. The method always
-// closes the http.Response Body.
-func (client CloudServicesUpdateDomainClient) GetUpdateDomainResponder(resp *http.Response) (result UpdateDomain, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListUpdateDomains gets a list of all update domains in a cloud service.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-func (client CloudServicesUpdateDomainClient) ListUpdateDomains(ctx context.Context, resourceGroupName string, cloudServiceName string) (result UpdateDomainListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesUpdateDomainClient.ListUpdateDomains")
- defer func() {
- sc := -1
- if result.udlr.Response.Response != nil {
- sc = result.udlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listUpdateDomainsNextResults
- req, err := client.ListUpdateDomainsPreparer(ctx, resourceGroupName, cloudServiceName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "ListUpdateDomains", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListUpdateDomainsSender(req)
- if err != nil {
- result.udlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "ListUpdateDomains", resp, "Failure sending request")
- return
- }
-
- result.udlr, err = client.ListUpdateDomainsResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "ListUpdateDomains", resp, "Failure responding to request")
- return
- }
- if result.udlr.hasNextLink() && result.udlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListUpdateDomainsPreparer prepares the ListUpdateDomains request.
-func (client CloudServicesUpdateDomainClient) ListUpdateDomainsPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListUpdateDomainsSender sends the ListUpdateDomains request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesUpdateDomainClient) ListUpdateDomainsSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListUpdateDomainsResponder handles the response to the ListUpdateDomains request. The method always
-// closes the http.Response Body.
-func (client CloudServicesUpdateDomainClient) ListUpdateDomainsResponder(resp *http.Response) (result UpdateDomainListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listUpdateDomainsNextResults retrieves the next set of results, if any.
-func (client CloudServicesUpdateDomainClient) listUpdateDomainsNextResults(ctx context.Context, lastResults UpdateDomainListResult) (result UpdateDomainListResult, err error) {
- req, err := lastResults.updateDomainListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "listUpdateDomainsNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListUpdateDomainsSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "listUpdateDomainsNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListUpdateDomainsResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "listUpdateDomainsNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListUpdateDomainsComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CloudServicesUpdateDomainClient) ListUpdateDomainsComplete(ctx context.Context, resourceGroupName string, cloudServiceName string) (result UpdateDomainListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesUpdateDomainClient.ListUpdateDomains")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListUpdateDomains(ctx, resourceGroupName, cloudServiceName)
- return
-}
-
-// WalkUpdateDomain updates the role instances in the specified update domain.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// cloudServiceName - name of the cloud service.
-// updateDomain - specifies an integer value that identifies the update domain. Update domains are identified
-// with a zero-based index: the first update domain has an ID of 0, the second has an ID of 1, and so on.
-// parameters - the update domain object.
-func (client CloudServicesUpdateDomainClient) WalkUpdateDomain(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, parameters *UpdateDomain) (result CloudServicesUpdateDomainWalkUpdateDomainFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServicesUpdateDomainClient.WalkUpdateDomain")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.WalkUpdateDomainPreparer(ctx, resourceGroupName, cloudServiceName, updateDomain, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "WalkUpdateDomain", nil, "Failure preparing request")
- return
- }
-
- result, err = client.WalkUpdateDomainSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainClient", "WalkUpdateDomain", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// WalkUpdateDomainPreparer prepares the WalkUpdateDomain request.
-func (client CloudServicesUpdateDomainClient) WalkUpdateDomainPreparer(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, parameters *UpdateDomain) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "cloudServiceName": autorest.Encode("path", cloudServiceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "updateDomain": autorest.Encode("path", updateDomain),
- }
-
- const APIVersion = "2021-03-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- parameters.ID = nil
- parameters.Name = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains/{updateDomain}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if parameters != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(parameters))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// WalkUpdateDomainSender sends the WalkUpdateDomain request. The method will close the
-// http.Response Body if it receives an error.
-func (client CloudServicesUpdateDomainClient) WalkUpdateDomainSender(req *http.Request) (future CloudServicesUpdateDomainWalkUpdateDomainFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// WalkUpdateDomainResponder handles the response to the WalkUpdateDomain request. The method always
-// closes the http.Response Body.
-func (client CloudServicesUpdateDomainClient) WalkUpdateDomainResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleries.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleries.go
deleted file mode 100644
index 99596387171d..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleries.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CommunityGalleriesClient is the compute Client
-type CommunityGalleriesClient struct {
- BaseClient
-}
-
-// NewCommunityGalleriesClient creates an instance of the CommunityGalleriesClient client.
-func NewCommunityGalleriesClient(subscriptionID string) CommunityGalleriesClient {
- return NewCommunityGalleriesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCommunityGalleriesClientWithBaseURI creates an instance of the CommunityGalleriesClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewCommunityGalleriesClientWithBaseURI(baseURI string, subscriptionID string) CommunityGalleriesClient {
- return CommunityGalleriesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get get a community gallery by gallery public name.
-// Parameters:
-// location - resource location.
-// publicGalleryName - the public name of the community gallery.
-func (client CommunityGalleriesClient) Get(ctx context.Context, location string, publicGalleryName string) (result CommunityGallery, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CommunityGalleriesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, publicGalleryName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleriesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleriesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleriesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client CommunityGalleriesClient) GetPreparer(ctx context.Context, location string, publicGalleryName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "publicGalleryName": autorest.Encode("path", publicGalleryName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client CommunityGalleriesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client CommunityGalleriesClient) GetResponder(resp *http.Response) (result CommunityGallery, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimages.go
deleted file mode 100644
index ef82a37bc941..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimages.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CommunityGalleryImagesClient is the compute Client
-type CommunityGalleryImagesClient struct {
- BaseClient
-}
-
-// NewCommunityGalleryImagesClient creates an instance of the CommunityGalleryImagesClient client.
-func NewCommunityGalleryImagesClient(subscriptionID string) CommunityGalleryImagesClient {
- return NewCommunityGalleryImagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCommunityGalleryImagesClientWithBaseURI creates an instance of the CommunityGalleryImagesClient client using a
-// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
-// Azure stack).
-func NewCommunityGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) CommunityGalleryImagesClient {
- return CommunityGalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get get a community gallery image.
-// Parameters:
-// location - resource location.
-// publicGalleryName - the public name of the community gallery.
-// galleryImageName - the name of the community gallery image definition.
-func (client CommunityGalleryImagesClient) Get(ctx context.Context, location string, publicGalleryName string, galleryImageName string) (result CommunityGalleryImage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CommunityGalleryImagesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, publicGalleryName, galleryImageName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImagesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImagesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImagesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client CommunityGalleryImagesClient) GetPreparer(ctx context.Context, location string, publicGalleryName string, galleryImageName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "location": autorest.Encode("path", location),
- "publicGalleryName": autorest.Encode("path", publicGalleryName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}/images/{galleryImageName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client CommunityGalleryImagesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client CommunityGalleryImagesClient) GetResponder(resp *http.Response) (result CommunityGalleryImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimageversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimageversions.go
deleted file mode 100644
index 03d67523dff7..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/communitygalleryimageversions.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// CommunityGalleryImageVersionsClient is the compute Client
-type CommunityGalleryImageVersionsClient struct {
- BaseClient
-}
-
-// NewCommunityGalleryImageVersionsClient creates an instance of the CommunityGalleryImageVersionsClient client.
-func NewCommunityGalleryImageVersionsClient(subscriptionID string) CommunityGalleryImageVersionsClient {
- return NewCommunityGalleryImageVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewCommunityGalleryImageVersionsClientWithBaseURI creates an instance of the CommunityGalleryImageVersionsClient
-// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI
-// (sovereign clouds, Azure stack).
-func NewCommunityGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) CommunityGalleryImageVersionsClient {
- return CommunityGalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get get a community gallery image version.
-// Parameters:
-// location - resource location.
-// publicGalleryName - the public name of the community gallery.
-// galleryImageName - the name of the community gallery image definition.
-// galleryImageVersionName - the name of the community gallery image version. Needs to follow semantic version
-// name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit
-// integer. Format: ..
-func (client CommunityGalleryImageVersionsClient) Get(ctx context.Context, location string, publicGalleryName string, galleryImageName string, galleryImageVersionName string) (result CommunityGalleryImageVersion, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CommunityGalleryImageVersionsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, publicGalleryName, galleryImageName, galleryImageVersionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImageVersionsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImageVersionsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CommunityGalleryImageVersionsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client CommunityGalleryImageVersionsClient) GetPreparer(ctx context.Context, location string, publicGalleryName string, galleryImageName string, galleryImageVersionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName),
- "location": autorest.Encode("path", location),
- "publicGalleryName": autorest.Encode("path", publicGalleryName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client CommunityGalleryImageVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client CommunityGalleryImageVersionsClient) GetResponder(resp *http.Response) (result CommunityGalleryImageVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go
deleted file mode 100644
index 67ace3df4d8e..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhostgroups.go
+++ /dev/null
@@ -1,589 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// DedicatedHostGroupsClient is the compute Client
-type DedicatedHostGroupsClient struct {
- BaseClient
-}
-
-// NewDedicatedHostGroupsClient creates an instance of the DedicatedHostGroupsClient client.
-func NewDedicatedHostGroupsClient(subscriptionID string) DedicatedHostGroupsClient {
- return NewDedicatedHostGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewDedicatedHostGroupsClientWithBaseURI creates an instance of the DedicatedHostGroupsClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewDedicatedHostGroupsClientWithBaseURI(baseURI string, subscriptionID string) DedicatedHostGroupsClient {
- return DedicatedHostGroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a dedicated host group. For details of Dedicated Host and Dedicated Host Groups
-// please see [Dedicated Host Documentation] (https://go.microsoft.com/fwlink/?linkid=2082596)
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-// parameters - parameters supplied to the Create Dedicated Host Group.
-func (client DedicatedHostGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, parameters DedicatedHostGroup) (result DedicatedHostGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.DedicatedHostGroupProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.DedicatedHostGroupProperties.PlatformFaultDomainCount", Name: validation.Null, Rule: true,
- Chain: []validation.Constraint{{Target: "parameters.DedicatedHostGroupProperties.PlatformFaultDomainCount", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}},
- }}}}}); err != nil {
- return result, validation.NewError("compute.DedicatedHostGroupsClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, hostGroupName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.CreateOrUpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "CreateOrUpdate", resp, "Failure sending request")
- return
- }
-
- result, err = client.CreateOrUpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "CreateOrUpdate", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client DedicatedHostGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, hostGroupName string, parameters DedicatedHostGroup) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result DedicatedHostGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete a dedicated host group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-func (client DedicatedHostGroupsClient) Delete(ctx context.Context, resourceGroupName string, hostGroupName string) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.Delete")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, hostGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.DeleteSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Delete", resp, "Failure sending request")
- return
- }
-
- result, err = client.DeleteResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Delete", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client DedicatedHostGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, hostGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about a dedicated host group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-// expand - the expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance
-// views of the dedicated hosts under the dedicated host group. 'UserData' is not supported for dedicated host
-// group.
-func (client DedicatedHostGroupsClient) Get(ctx context.Context, resourceGroupName string, hostGroupName string, expand InstanceViewTypes) (result DedicatedHostGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, hostGroupName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client DedicatedHostGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, hostGroupName string, expand InstanceViewTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostGroupsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostGroupsClient) GetResponder(resp *http.Response) (result DedicatedHostGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByResourceGroup lists all of the dedicated host groups in the specified resource group. Use the nextLink
-// property in the response to get the next page of dedicated host groups.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client DedicatedHostGroupsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DedicatedHostGroupListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.dhglr.Response.Response != nil {
- sc = result.dhglr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.dhglr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.dhglr, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.dhglr.hasNextLink() && result.dhglr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client DedicatedHostGroupsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostGroupsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostGroupsClient) ListByResourceGroupResponder(resp *http.Response) (result DedicatedHostGroupListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client DedicatedHostGroupsClient) listByResourceGroupNextResults(ctx context.Context, lastResults DedicatedHostGroupListResult) (result DedicatedHostGroupListResult, err error) {
- req, err := lastResults.dedicatedHostGroupListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DedicatedHostGroupsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DedicatedHostGroupListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// ListBySubscription lists all of the dedicated host groups in the subscription. Use the nextLink property in the
-// response to get the next page of dedicated host groups.
-func (client DedicatedHostGroupsClient) ListBySubscription(ctx context.Context) (result DedicatedHostGroupListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.dhglr.Response.Response != nil {
- sc = result.dhglr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listBySubscriptionNextResults
- req, err := client.ListBySubscriptionPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "ListBySubscription", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.dhglr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "ListBySubscription", resp, "Failure sending request")
- return
- }
-
- result.dhglr, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "ListBySubscription", resp, "Failure responding to request")
- return
- }
- if result.dhglr.hasNextLink() && result.dhglr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListBySubscriptionPreparer prepares the ListBySubscription request.
-func (client DedicatedHostGroupsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/hostGroups", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostGroupsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostGroupsClient) ListBySubscriptionResponder(resp *http.Response) (result DedicatedHostGroupListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listBySubscriptionNextResults retrieves the next set of results, if any.
-func (client DedicatedHostGroupsClient) listBySubscriptionNextResults(ctx context.Context, lastResults DedicatedHostGroupListResult) (result DedicatedHostGroupListResult, err error) {
- req, err := lastResults.dedicatedHostGroupListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DedicatedHostGroupsClient) ListBySubscriptionComplete(ctx context.Context) (result DedicatedHostGroupListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListBySubscription(ctx)
- return
-}
-
-// Update update an dedicated host group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-// parameters - parameters supplied to the Update Dedicated Host Group operation.
-func (client DedicatedHostGroupsClient) Update(ctx context.Context, resourceGroupName string, hostGroupName string, parameters DedicatedHostGroupUpdate) (result DedicatedHostGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupsClient.Update")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, hostGroupName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.UpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Update", resp, "Failure sending request")
- return
- }
-
- result, err = client.UpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostGroupsClient", "Update", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client DedicatedHostGroupsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, hostGroupName string, parameters DedicatedHostGroupUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostGroupsClient) UpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostGroupsClient) UpdateResponder(resp *http.Response) (result DedicatedHostGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go
deleted file mode 100644
index a58e6b2b84bb..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/dedicatedhosts.go
+++ /dev/null
@@ -1,492 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// DedicatedHostsClient is the compute Client
-type DedicatedHostsClient struct {
- BaseClient
-}
-
-// NewDedicatedHostsClient creates an instance of the DedicatedHostsClient client.
-func NewDedicatedHostsClient(subscriptionID string) DedicatedHostsClient {
- return NewDedicatedHostsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewDedicatedHostsClientWithBaseURI creates an instance of the DedicatedHostsClient client using a custom endpoint.
-// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewDedicatedHostsClientWithBaseURI(baseURI string, subscriptionID string) DedicatedHostsClient {
- return DedicatedHostsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a dedicated host .
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-// hostName - the name of the dedicated host .
-// parameters - parameters supplied to the Create Dedicated Host.
-func (client DedicatedHostsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost) (result DedicatedHostsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.DedicatedHostProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.DedicatedHostProperties.PlatformFaultDomain", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.DedicatedHostProperties.PlatformFaultDomain", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}},
- }},
- {Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.DedicatedHostsClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, hostGroupName, hostName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client DedicatedHostsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "hostName": autorest.Encode("path", hostName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostsClient) CreateOrUpdateSender(req *http.Request) (future DedicatedHostsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostsClient) CreateOrUpdateResponder(resp *http.Response) (result DedicatedHost, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete a dedicated host.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-// hostName - the name of the dedicated host.
-func (client DedicatedHostsClient) Delete(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string) (result DedicatedHostsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, hostGroupName, hostName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client DedicatedHostsClient) DeletePreparer(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "hostName": autorest.Encode("path", hostName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostsClient) DeleteSender(req *http.Request) (future DedicatedHostsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about a dedicated host.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-// hostName - the name of the dedicated host.
-// expand - the expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance
-// views of the dedicated host. 'UserData' is not supported for dedicated host.
-func (client DedicatedHostsClient) Get(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, expand InstanceViewTypes) (result DedicatedHost, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, hostGroupName, hostName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client DedicatedHostsClient) GetPreparer(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, expand InstanceViewTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "hostName": autorest.Encode("path", hostName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostsClient) GetResponder(resp *http.Response) (result DedicatedHost, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByHostGroup lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink property in
-// the response to get the next page of dedicated hosts.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-func (client DedicatedHostsClient) ListByHostGroup(ctx context.Context, resourceGroupName string, hostGroupName string) (result DedicatedHostListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostsClient.ListByHostGroup")
- defer func() {
- sc := -1
- if result.dhlr.Response.Response != nil {
- sc = result.dhlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByHostGroupNextResults
- req, err := client.ListByHostGroupPreparer(ctx, resourceGroupName, hostGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "ListByHostGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByHostGroupSender(req)
- if err != nil {
- result.dhlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "ListByHostGroup", resp, "Failure sending request")
- return
- }
-
- result.dhlr, err = client.ListByHostGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "ListByHostGroup", resp, "Failure responding to request")
- return
- }
- if result.dhlr.hasNextLink() && result.dhlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByHostGroupPreparer prepares the ListByHostGroup request.
-func (client DedicatedHostsClient) ListByHostGroupPreparer(ctx context.Context, resourceGroupName string, hostGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByHostGroupSender sends the ListByHostGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostsClient) ListByHostGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByHostGroupResponder handles the response to the ListByHostGroup request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostsClient) ListByHostGroupResponder(resp *http.Response) (result DedicatedHostListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByHostGroupNextResults retrieves the next set of results, if any.
-func (client DedicatedHostsClient) listByHostGroupNextResults(ctx context.Context, lastResults DedicatedHostListResult) (result DedicatedHostListResult, err error) {
- req, err := lastResults.dedicatedHostListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "listByHostGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByHostGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "listByHostGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByHostGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "listByHostGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByHostGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DedicatedHostsClient) ListByHostGroupComplete(ctx context.Context, resourceGroupName string, hostGroupName string) (result DedicatedHostListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostsClient.ListByHostGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByHostGroup(ctx, resourceGroupName, hostGroupName)
- return
-}
-
-// Update update an dedicated host .
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// hostGroupName - the name of the dedicated host group.
-// hostName - the name of the dedicated host .
-// parameters - parameters supplied to the Update Dedicated Host operation.
-func (client DedicatedHostsClient) Update(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate) (result DedicatedHostsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, hostGroupName, hostName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client DedicatedHostsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hostGroupName": autorest.Encode("path", hostGroupName),
- "hostName": autorest.Encode("path", hostName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client DedicatedHostsClient) UpdateSender(req *http.Request) (future DedicatedHostsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client DedicatedHostsClient) UpdateResponder(resp *http.Response) (result DedicatedHost, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go
deleted file mode 100644
index 618db0d550a5..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskaccesses.go
+++ /dev/null
@@ -1,1045 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// DiskAccessesClient is the compute Client
-type DiskAccessesClient struct {
- BaseClient
-}
-
-// NewDiskAccessesClient creates an instance of the DiskAccessesClient client.
-func NewDiskAccessesClient(subscriptionID string) DiskAccessesClient {
- return NewDiskAccessesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewDiskAccessesClientWithBaseURI creates an instance of the DiskAccessesClient client using a custom endpoint. Use
-// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewDiskAccessesClientWithBaseURI(baseURI string, subscriptionID string) DiskAccessesClient {
- return DiskAccessesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate creates or updates a disk access resource
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-// diskAccess - disk access object supplied in the body of the Put disk access operation.
-func (client DiskAccessesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess) (result DiskAccessesCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskAccessName, diskAccess)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client DiskAccessesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}", pathParameters),
- autorest.WithJSON(diskAccess),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) CreateOrUpdateSender(req *http.Request) (future DiskAccessesCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) CreateOrUpdateResponder(resp *http.Response) (result DiskAccess, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete deletes a disk access resource.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-func (client DiskAccessesClient) Delete(ctx context.Context, resourceGroupName string, diskAccessName string) (result DiskAccessesDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, diskAccessName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client DiskAccessesClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskAccessName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) DeleteSender(req *http.Request) (future DiskAccessesDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// DeleteAPrivateEndpointConnection deletes a private endpoint connection under a disk access resource.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-// privateEndpointConnectionName - the name of the private endpoint connection.
-func (client DiskAccessesClient) DeleteAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string) (result DiskAccessesDeleteAPrivateEndpointConnectionFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.DeleteAPrivateEndpointConnection")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeleteAPrivateEndpointConnectionPreparer(ctx, resourceGroupName, diskAccessName, privateEndpointConnectionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "DeleteAPrivateEndpointConnection", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteAPrivateEndpointConnectionSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "DeleteAPrivateEndpointConnection", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeleteAPrivateEndpointConnectionPreparer prepares the DeleteAPrivateEndpointConnection request.
-func (client DiskAccessesClient) DeleteAPrivateEndpointConnectionPreparer(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteAPrivateEndpointConnectionSender sends the DeleteAPrivateEndpointConnection request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) DeleteAPrivateEndpointConnectionSender(req *http.Request) (future DiskAccessesDeleteAPrivateEndpointConnectionFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteAPrivateEndpointConnectionResponder handles the response to the DeleteAPrivateEndpointConnection request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) DeleteAPrivateEndpointConnectionResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get gets information about a disk access resource.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-func (client DiskAccessesClient) Get(ctx context.Context, resourceGroupName string, diskAccessName string) (result DiskAccess, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, diskAccessName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client DiskAccessesClient) GetPreparer(ctx context.Context, resourceGroupName string, diskAccessName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) GetResponder(resp *http.Response) (result DiskAccess, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetAPrivateEndpointConnection gets information about a private endpoint connection under a disk access resource.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-// privateEndpointConnectionName - the name of the private endpoint connection.
-func (client DiskAccessesClient) GetAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.GetAPrivateEndpointConnection")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetAPrivateEndpointConnectionPreparer(ctx, resourceGroupName, diskAccessName, privateEndpointConnectionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "GetAPrivateEndpointConnection", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetAPrivateEndpointConnectionSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "GetAPrivateEndpointConnection", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetAPrivateEndpointConnectionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "GetAPrivateEndpointConnection", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetAPrivateEndpointConnectionPreparer prepares the GetAPrivateEndpointConnection request.
-func (client DiskAccessesClient) GetAPrivateEndpointConnectionPreparer(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetAPrivateEndpointConnectionSender sends the GetAPrivateEndpointConnection request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) GetAPrivateEndpointConnectionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetAPrivateEndpointConnectionResponder handles the response to the GetAPrivateEndpointConnection request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) GetAPrivateEndpointConnectionResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetPrivateLinkResources gets the private link resources possible under disk access resource
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-func (client DiskAccessesClient) GetPrivateLinkResources(ctx context.Context, resourceGroupName string, diskAccessName string) (result PrivateLinkResourceListResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.GetPrivateLinkResources")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPrivateLinkResourcesPreparer(ctx, resourceGroupName, diskAccessName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "GetPrivateLinkResources", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetPrivateLinkResourcesSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "GetPrivateLinkResources", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetPrivateLinkResourcesResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "GetPrivateLinkResources", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPrivateLinkResourcesPreparer prepares the GetPrivateLinkResources request.
-func (client DiskAccessesClient) GetPrivateLinkResourcesPreparer(ctx context.Context, resourceGroupName string, diskAccessName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetPrivateLinkResourcesSender sends the GetPrivateLinkResources request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) GetPrivateLinkResourcesSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetPrivateLinkResourcesResponder handles the response to the GetPrivateLinkResources request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) GetPrivateLinkResourcesResponder(resp *http.Response) (result PrivateLinkResourceListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List lists all the disk access resources under a subscription.
-func (client DiskAccessesClient) List(ctx context.Context) (result DiskAccessListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.List")
- defer func() {
- sc := -1
- if result.dal.Response.Response != nil {
- sc = result.dal.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.dal.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.dal, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.dal.hasNextLink() && result.dal.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client DiskAccessesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) ListResponder(resp *http.Response) (result DiskAccessList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client DiskAccessesClient) listNextResults(ctx context.Context, lastResults DiskAccessList) (result DiskAccessList, err error) {
- req, err := lastResults.diskAccessListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DiskAccessesClient) ListComplete(ctx context.Context) (result DiskAccessListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx)
- return
-}
-
-// ListByResourceGroup lists all the disk access resources under a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client DiskAccessesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskAccessListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.dal.Response.Response != nil {
- sc = result.dal.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.dal.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.dal, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.dal.hasNextLink() && result.dal.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client DiskAccessesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) ListByResourceGroupResponder(resp *http.Response) (result DiskAccessList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client DiskAccessesClient) listByResourceGroupNextResults(ctx context.Context, lastResults DiskAccessList) (result DiskAccessList, err error) {
- req, err := lastResults.diskAccessListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DiskAccessesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DiskAccessListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// ListPrivateEndpointConnections list information about private endpoint connections under a disk access resource
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-func (client DiskAccessesClient) ListPrivateEndpointConnections(ctx context.Context, resourceGroupName string, diskAccessName string) (result PrivateEndpointConnectionListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.ListPrivateEndpointConnections")
- defer func() {
- sc := -1
- if result.peclr.Response.Response != nil {
- sc = result.peclr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listPrivateEndpointConnectionsNextResults
- req, err := client.ListPrivateEndpointConnectionsPreparer(ctx, resourceGroupName, diskAccessName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "ListPrivateEndpointConnections", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListPrivateEndpointConnectionsSender(req)
- if err != nil {
- result.peclr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "ListPrivateEndpointConnections", resp, "Failure sending request")
- return
- }
-
- result.peclr, err = client.ListPrivateEndpointConnectionsResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "ListPrivateEndpointConnections", resp, "Failure responding to request")
- return
- }
- if result.peclr.hasNextLink() && result.peclr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPrivateEndpointConnectionsPreparer prepares the ListPrivateEndpointConnections request.
-func (client DiskAccessesClient) ListPrivateEndpointConnectionsPreparer(ctx context.Context, resourceGroupName string, diskAccessName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListPrivateEndpointConnectionsSender sends the ListPrivateEndpointConnections request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) ListPrivateEndpointConnectionsSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListPrivateEndpointConnectionsResponder handles the response to the ListPrivateEndpointConnections request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) ListPrivateEndpointConnectionsResponder(resp *http.Response) (result PrivateEndpointConnectionListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listPrivateEndpointConnectionsNextResults retrieves the next set of results, if any.
-func (client DiskAccessesClient) listPrivateEndpointConnectionsNextResults(ctx context.Context, lastResults PrivateEndpointConnectionListResult) (result PrivateEndpointConnectionListResult, err error) {
- req, err := lastResults.privateEndpointConnectionListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listPrivateEndpointConnectionsNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListPrivateEndpointConnectionsSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listPrivateEndpointConnectionsNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListPrivateEndpointConnectionsResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "listPrivateEndpointConnectionsNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListPrivateEndpointConnectionsComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DiskAccessesClient) ListPrivateEndpointConnectionsComplete(ctx context.Context, resourceGroupName string, diskAccessName string) (result PrivateEndpointConnectionListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.ListPrivateEndpointConnections")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListPrivateEndpointConnections(ctx, resourceGroupName, diskAccessName)
- return
-}
-
-// Update updates (patches) a disk access resource.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-// diskAccess - disk access object supplied in the body of the Patch disk access operation.
-func (client DiskAccessesClient) Update(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate) (result DiskAccessesUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, diskAccessName, diskAccess)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client DiskAccessesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}", pathParameters),
- autorest.WithJSON(diskAccess),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) UpdateSender(req *http.Request) (future DiskAccessesUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) UpdateResponder(resp *http.Response) (result DiskAccess, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// UpdateAPrivateEndpointConnection approve or reject a private endpoint connection under disk access resource, this
-// can't be used to create a new private endpoint connection.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskAccessName - the name of the disk access resource that is being created. The name can't be changed after
-// the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-// privateEndpointConnectionName - the name of the private endpoint connection.
-// privateEndpointConnection - private endpoint connection object supplied in the body of the Put private
-// endpoint connection operation.
-func (client DiskAccessesClient) UpdateAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection) (result DiskAccessesUpdateAPrivateEndpointConnectionFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessesClient.UpdateAPrivateEndpointConnection")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: privateEndpointConnection,
- Constraints: []validation.Constraint{{Target: "privateEndpointConnection.PrivateEndpointConnectionProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "privateEndpointConnection.PrivateEndpointConnectionProperties.PrivateLinkServiceConnectionState", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
- return result, validation.NewError("compute.DiskAccessesClient", "UpdateAPrivateEndpointConnection", err.Error())
- }
-
- req, err := client.UpdateAPrivateEndpointConnectionPreparer(ctx, resourceGroupName, diskAccessName, privateEndpointConnectionName, privateEndpointConnection)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "UpdateAPrivateEndpointConnection", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateAPrivateEndpointConnectionSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesClient", "UpdateAPrivateEndpointConnection", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdateAPrivateEndpointConnectionPreparer prepares the UpdateAPrivateEndpointConnection request.
-func (client DiskAccessesClient) UpdateAPrivateEndpointConnectionPreparer(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskAccessName": autorest.Encode("path", diskAccessName),
- "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- privateEndpointConnection.ID = nil
- privateEndpointConnection.Name = nil
- privateEndpointConnection.Type = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
- autorest.WithJSON(privateEndpointConnection),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateAPrivateEndpointConnectionSender sends the UpdateAPrivateEndpointConnection request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskAccessesClient) UpdateAPrivateEndpointConnectionSender(req *http.Request) (future DiskAccessesUpdateAPrivateEndpointConnectionFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateAPrivateEndpointConnectionResponder handles the response to the UpdateAPrivateEndpointConnection request. The method always
-// closes the http.Response Body.
-func (client DiskAccessesClient) UpdateAPrivateEndpointConnectionResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go
deleted file mode 100644
index af979a6e8909..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskencryptionsets.go
+++ /dev/null
@@ -1,719 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// DiskEncryptionSetsClient is the compute Client
-type DiskEncryptionSetsClient struct {
- BaseClient
-}
-
-// NewDiskEncryptionSetsClient creates an instance of the DiskEncryptionSetsClient client.
-func NewDiskEncryptionSetsClient(subscriptionID string) DiskEncryptionSetsClient {
- return NewDiskEncryptionSetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewDiskEncryptionSetsClientWithBaseURI creates an instance of the DiskEncryptionSetsClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewDiskEncryptionSetsClientWithBaseURI(baseURI string, subscriptionID string) DiskEncryptionSetsClient {
- return DiskEncryptionSetsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate creates or updates a disk encryption set
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed
-// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-// diskEncryptionSet - disk encryption set object supplied in the body of the Put disk encryption set
-// operation.
-func (client DiskEncryptionSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet) (result DiskEncryptionSetsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: diskEncryptionSet,
- Constraints: []validation.Constraint{{Target: "diskEncryptionSet.EncryptionSetProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "diskEncryptionSet.EncryptionSetProperties.ActiveKey", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "diskEncryptionSet.EncryptionSetProperties.ActiveKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}}},
- }}}}}); err != nil {
- return result, validation.NewError("compute.DiskEncryptionSetsClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskEncryptionSetName, diskEncryptionSet)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client DiskEncryptionSetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", pathParameters),
- autorest.WithJSON(diskEncryptionSet),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskEncryptionSetsClient) CreateOrUpdateSender(req *http.Request) (future DiskEncryptionSetsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client DiskEncryptionSetsClient) CreateOrUpdateResponder(resp *http.Response) (result DiskEncryptionSet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete deletes a disk encryption set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed
-// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-func (client DiskEncryptionSetsClient) Delete(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result DiskEncryptionSetsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, diskEncryptionSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client DiskEncryptionSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskEncryptionSetsClient) DeleteSender(req *http.Request) (future DiskEncryptionSetsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client DiskEncryptionSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get gets information about a disk encryption set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed
-// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-func (client DiskEncryptionSetsClient) Get(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result DiskEncryptionSet, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, diskEncryptionSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client DiskEncryptionSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskEncryptionSetsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client DiskEncryptionSetsClient) GetResponder(resp *http.Response) (result DiskEncryptionSet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List lists all the disk encryption sets under a subscription.
-func (client DiskEncryptionSetsClient) List(ctx context.Context) (result DiskEncryptionSetListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.List")
- defer func() {
- sc := -1
- if result.desl.Response.Response != nil {
- sc = result.desl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.desl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.desl, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.desl.hasNextLink() && result.desl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client DiskEncryptionSetsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskEncryptionSetsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client DiskEncryptionSetsClient) ListResponder(resp *http.Response) (result DiskEncryptionSetList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client DiskEncryptionSetsClient) listNextResults(ctx context.Context, lastResults DiskEncryptionSetList) (result DiskEncryptionSetList, err error) {
- req, err := lastResults.diskEncryptionSetListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DiskEncryptionSetsClient) ListComplete(ctx context.Context) (result DiskEncryptionSetListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx)
- return
-}
-
-// ListAssociatedResources lists all resources that are encrypted with this disk encryption set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed
-// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-func (client DiskEncryptionSetsClient) ListAssociatedResources(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result ResourceURIListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.ListAssociatedResources")
- defer func() {
- sc := -1
- if result.rul.Response.Response != nil {
- sc = result.rul.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listAssociatedResourcesNextResults
- req, err := client.ListAssociatedResourcesPreparer(ctx, resourceGroupName, diskEncryptionSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListAssociatedResources", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListAssociatedResourcesSender(req)
- if err != nil {
- result.rul.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListAssociatedResources", resp, "Failure sending request")
- return
- }
-
- result.rul, err = client.ListAssociatedResourcesResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListAssociatedResources", resp, "Failure responding to request")
- return
- }
- if result.rul.hasNextLink() && result.rul.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListAssociatedResourcesPreparer prepares the ListAssociatedResources request.
-func (client DiskEncryptionSetsClient) ListAssociatedResourcesPreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}/associatedResources", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListAssociatedResourcesSender sends the ListAssociatedResources request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskEncryptionSetsClient) ListAssociatedResourcesSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListAssociatedResourcesResponder handles the response to the ListAssociatedResources request. The method always
-// closes the http.Response Body.
-func (client DiskEncryptionSetsClient) ListAssociatedResourcesResponder(resp *http.Response) (result ResourceURIList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listAssociatedResourcesNextResults retrieves the next set of results, if any.
-func (client DiskEncryptionSetsClient) listAssociatedResourcesNextResults(ctx context.Context, lastResults ResourceURIList) (result ResourceURIList, err error) {
- req, err := lastResults.resourceURIListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listAssociatedResourcesNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListAssociatedResourcesSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listAssociatedResourcesNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListAssociatedResourcesResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listAssociatedResourcesNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListAssociatedResourcesComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DiskEncryptionSetsClient) ListAssociatedResourcesComplete(ctx context.Context, resourceGroupName string, diskEncryptionSetName string) (result ResourceURIListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.ListAssociatedResources")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListAssociatedResources(ctx, resourceGroupName, diskEncryptionSetName)
- return
-}
-
-// ListByResourceGroup lists all the disk encryption sets under a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client DiskEncryptionSetsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskEncryptionSetListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.desl.Response.Response != nil {
- sc = result.desl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.desl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.desl, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.desl.hasNextLink() && result.desl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client DiskEncryptionSetsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskEncryptionSetsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client DiskEncryptionSetsClient) ListByResourceGroupResponder(resp *http.Response) (result DiskEncryptionSetList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client DiskEncryptionSetsClient) listByResourceGroupNextResults(ctx context.Context, lastResults DiskEncryptionSetList) (result DiskEncryptionSetList, err error) {
- req, err := lastResults.diskEncryptionSetListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DiskEncryptionSetsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DiskEncryptionSetListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// Update updates (patches) a disk encryption set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskEncryptionSetName - the name of the disk encryption set that is being created. The name can't be changed
-// after the disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The
-// maximum name length is 80 characters.
-// diskEncryptionSet - disk encryption set object supplied in the body of the Patch disk encryption set
-// operation.
-func (client DiskEncryptionSetsClient) Update(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate) (result DiskEncryptionSetsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, diskEncryptionSetName, diskEncryptionSet)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client DiskEncryptionSetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskEncryptionSetName": autorest.Encode("path", diskEncryptionSetName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", pathParameters),
- autorest.WithJSON(diskEncryptionSet),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskEncryptionSetsClient) UpdateSender(req *http.Request) (future DiskEncryptionSetsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client DiskEncryptionSetsClient) UpdateResponder(resp *http.Response) (result DiskEncryptionSet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go
deleted file mode 100644
index d1fe4a66bc2e..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/diskrestorepoint.go
+++ /dev/null
@@ -1,407 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// DiskRestorePointClient is the compute Client
-type DiskRestorePointClient struct {
- BaseClient
-}
-
-// NewDiskRestorePointClient creates an instance of the DiskRestorePointClient client.
-func NewDiskRestorePointClient(subscriptionID string) DiskRestorePointClient {
- return NewDiskRestorePointClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewDiskRestorePointClientWithBaseURI creates an instance of the DiskRestorePointClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewDiskRestorePointClientWithBaseURI(baseURI string, subscriptionID string) DiskRestorePointClient {
- return DiskRestorePointClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get get disk restorePoint resource
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection that the disk restore point belongs.
-// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs.
-// diskRestorePointName - the name of the disk restore point created.
-func (client DiskRestorePointClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string) (result DiskRestorePoint, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, restorePointCollectionName, VMRestorePointName, diskRestorePointName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client DiskRestorePointClient) GetPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskRestorePointName": autorest.Encode("path", diskRestorePointName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmRestorePointName": autorest.Encode("path", VMRestorePointName),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskRestorePointClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client DiskRestorePointClient) GetResponder(resp *http.Response) (result DiskRestorePoint, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GrantAccess grants access to a diskRestorePoint.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection that the disk restore point belongs.
-// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs.
-// diskRestorePointName - the name of the disk restore point created.
-// grantAccessData - access data object supplied in the body of the get disk access operation.
-func (client DiskRestorePointClient) GrantAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData) (result DiskRestorePointGrantAccessFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointClient.GrantAccess")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: grantAccessData,
- Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.DiskRestorePointClient", "GrantAccess", err.Error())
- }
-
- req, err := client.GrantAccessPreparer(ctx, resourceGroupName, restorePointCollectionName, VMRestorePointName, diskRestorePointName, grantAccessData)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "GrantAccess", nil, "Failure preparing request")
- return
- }
-
- result, err = client.GrantAccessSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "GrantAccess", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// GrantAccessPreparer prepares the GrantAccess request.
-func (client DiskRestorePointClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskRestorePointName": autorest.Encode("path", diskRestorePointName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmRestorePointName": autorest.Encode("path", VMRestorePointName),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/beginGetAccess", pathParameters),
- autorest.WithJSON(grantAccessData),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GrantAccessSender sends the GrantAccess request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskRestorePointClient) GrantAccessSender(req *http.Request) (future DiskRestorePointGrantAccessFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// GrantAccessResponder handles the response to the GrantAccess request. The method always
-// closes the http.Response Body.
-func (client DiskRestorePointClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByRestorePoint lists diskRestorePoints under a vmRestorePoint.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection that the disk restore point belongs.
-// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs.
-func (client DiskRestorePointClient) ListByRestorePoint(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string) (result DiskRestorePointListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointClient.ListByRestorePoint")
- defer func() {
- sc := -1
- if result.drpl.Response.Response != nil {
- sc = result.drpl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByRestorePointNextResults
- req, err := client.ListByRestorePointPreparer(ctx, resourceGroupName, restorePointCollectionName, VMRestorePointName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "ListByRestorePoint", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByRestorePointSender(req)
- if err != nil {
- result.drpl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "ListByRestorePoint", resp, "Failure sending request")
- return
- }
-
- result.drpl, err = client.ListByRestorePointResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "ListByRestorePoint", resp, "Failure responding to request")
- return
- }
- if result.drpl.hasNextLink() && result.drpl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByRestorePointPreparer prepares the ListByRestorePoint request.
-func (client DiskRestorePointClient) ListByRestorePointPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmRestorePointName": autorest.Encode("path", VMRestorePointName),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByRestorePointSender sends the ListByRestorePoint request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskRestorePointClient) ListByRestorePointSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByRestorePointResponder handles the response to the ListByRestorePoint request. The method always
-// closes the http.Response Body.
-func (client DiskRestorePointClient) ListByRestorePointResponder(resp *http.Response) (result DiskRestorePointList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByRestorePointNextResults retrieves the next set of results, if any.
-func (client DiskRestorePointClient) listByRestorePointNextResults(ctx context.Context, lastResults DiskRestorePointList) (result DiskRestorePointList, err error) {
- req, err := lastResults.diskRestorePointListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "listByRestorePointNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByRestorePointSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "listByRestorePointNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByRestorePointResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "listByRestorePointNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByRestorePointComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DiskRestorePointClient) ListByRestorePointComplete(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string) (result DiskRestorePointListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointClient.ListByRestorePoint")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByRestorePoint(ctx, resourceGroupName, restorePointCollectionName, VMRestorePointName)
- return
-}
-
-// RevokeAccess revokes access to a diskRestorePoint.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection that the disk restore point belongs.
-// VMRestorePointName - the name of the vm restore point that the disk disk restore point belongs.
-// diskRestorePointName - the name of the disk restore point created.
-func (client DiskRestorePointClient) RevokeAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string) (result DiskRestorePointRevokeAccessFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointClient.RevokeAccess")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, restorePointCollectionName, VMRestorePointName, diskRestorePointName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "RevokeAccess", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RevokeAccessSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointClient", "RevokeAccess", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RevokeAccessPreparer prepares the RevokeAccess request.
-func (client DiskRestorePointClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, VMRestorePointName string, diskRestorePointName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskRestorePointName": autorest.Encode("path", diskRestorePointName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmRestorePointName": autorest.Encode("path", VMRestorePointName),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/endGetAccess", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RevokeAccessSender sends the RevokeAccess request. The method will close the
-// http.Response Body if it receives an error.
-func (client DiskRestorePointClient) RevokeAccessSender(req *http.Request) (future DiskRestorePointRevokeAccessFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RevokeAccessResponder handles the response to the RevokeAccess request. The method always
-// closes the http.Response Body.
-func (client DiskRestorePointClient) RevokeAccessResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go
deleted file mode 100644
index 4aec8ae542ff..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/disks.go
+++ /dev/null
@@ -1,779 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// DisksClient is the compute Client
-type DisksClient struct {
- BaseClient
-}
-
-// NewDisksClient creates an instance of the DisksClient client.
-func NewDisksClient(subscriptionID string) DisksClient {
- return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewDisksClientWithBaseURI creates an instance of the DisksClient client using a custom endpoint. Use this when
-// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient {
- return DisksClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate creates or updates a disk.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is
-// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80
-// characters.
-// disk - disk object supplied in the body of the Put disk operation.
-func (client DisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (result DisksCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: disk,
- Constraints: []validation.Constraint{{Target: "disk.DiskProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "disk.DiskProperties.PurchasePlan", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "disk.DiskProperties.PurchasePlan.Publisher", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "disk.DiskProperties.PurchasePlan.Name", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "disk.DiskProperties.PurchasePlan.Product", Name: validation.Null, Rule: true, Chain: nil},
- }},
- {Target: "disk.DiskProperties.CreationData", Name: validation.Null, Rule: true,
- Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}},
- {Target: "disk.DiskProperties.CreationData.GalleryImageReference", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.GalleryImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}},
- }},
- {Target: "disk.DiskProperties.EncryptionSettingsCollection", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettingsCollection.Enabled", Name: validation.Null, Rule: true, Chain: nil}}},
- }}}}}); err != nil {
- return result, validation.NewError("compute.DisksClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskName, disk)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskName": autorest.Encode("path", diskName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- disk.ManagedBy = nil
- disk.ManagedByExtended = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
- autorest.WithJSON(disk),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Disk, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete deletes a disk.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is
-// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80
-// characters.
-func (client DisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (result DisksDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, diskName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskName": autorest.Encode("path", diskName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client DisksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get gets information about a disk.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is
-// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80
-// characters.
-func (client DisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result Disk, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, diskName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskName": autorest.Encode("path", diskName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client DisksClient) GetResponder(resp *http.Response) (result Disk, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GrantAccess grants access to a disk.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is
-// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80
-// characters.
-// grantAccessData - access data object supplied in the body of the get disk access operation.
-func (client DisksClient) GrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (result DisksGrantAccessFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.GrantAccess")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: grantAccessData,
- Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.DisksClient", "GrantAccess", err.Error())
- }
-
- req, err := client.GrantAccessPreparer(ctx, resourceGroupName, diskName, grantAccessData)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", nil, "Failure preparing request")
- return
- }
-
- result, err = client.GrantAccessSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// GrantAccessPreparer prepares the GrantAccess request.
-func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskName": autorest.Encode("path", diskName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters),
- autorest.WithJSON(grantAccessData),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GrantAccessSender sends the GrantAccess request. The method will close the
-// http.Response Body if it receives an error.
-func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGrantAccessFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// GrantAccessResponder handles the response to the GrantAccess request. The method always
-// closes the http.Response Body.
-func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List lists all the disks under a subscription.
-func (client DisksClient) List(ctx context.Context) (result DiskListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.List")
- defer func() {
- sc := -1
- if result.dl.Response.Response != nil {
- sc = result.dl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.dl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure sending request")
- return
- }
-
- result.dl, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure responding to request")
- return
- }
- if result.dl.hasNextLink() && result.dl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client DisksClient) ListResponder(resp *http.Response) (result DiskList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client DisksClient) listNextResults(ctx context.Context, lastResults DiskList) (result DiskList, err error) {
- req, err := lastResults.diskListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DisksClient) ListComplete(ctx context.Context) (result DiskListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx)
- return
-}
-
-// ListByResourceGroup lists all the disks under a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client DisksClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.dl.Response.Response != nil {
- sc = result.dl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.dl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.dl, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.dl.hasNextLink() && result.dl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result DiskList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client DisksClient) listByResourceGroupNextResults(ctx context.Context, lastResults DiskList) (result DiskList, err error) {
- req, err := lastResults.diskListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client DisksClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DiskListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// RevokeAccess revokes access to a disk.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is
-// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80
-// characters.
-func (client DisksClient) RevokeAccess(ctx context.Context, resourceGroupName string, diskName string) (result DisksRevokeAccessFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.RevokeAccess")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, diskName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RevokeAccessSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RevokeAccessPreparer prepares the RevokeAccess request.
-func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskName": autorest.Encode("path", diskName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RevokeAccessSender sends the RevokeAccess request. The method will close the
-// http.Response Body if it receives an error.
-func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRevokeAccessFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RevokeAccessResponder handles the response to the RevokeAccess request. The method always
-// closes the http.Response Body.
-func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Update updates (patches) a disk.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is
-// created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80
-// characters.
-// disk - disk object supplied in the body of the Patch disk operation.
-func (client DisksClient) Update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (result DisksUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, diskName, disk)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "diskName": autorest.Encode("path", diskName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
- autorest.WithJSON(disk),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client DisksClient) UpdateResponder(resp *http.Response) (result Disk, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go
deleted file mode 100644
index 1edd7683383f..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/enums.go
+++ /dev/null
@@ -1,2042 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-// AccessLevel enumerates the values for access level.
-type AccessLevel string
-
-const (
- // AccessLevelNone ...
- AccessLevelNone AccessLevel = "None"
- // AccessLevelRead ...
- AccessLevelRead AccessLevel = "Read"
- // AccessLevelWrite ...
- AccessLevelWrite AccessLevel = "Write"
-)
-
-// PossibleAccessLevelValues returns an array of possible values for the AccessLevel const type.
-func PossibleAccessLevelValues() []AccessLevel {
- return []AccessLevel{AccessLevelNone, AccessLevelRead, AccessLevelWrite}
-}
-
-// AggregatedReplicationState enumerates the values for aggregated replication state.
-type AggregatedReplicationState string
-
-const (
- // AggregatedReplicationStateCompleted ...
- AggregatedReplicationStateCompleted AggregatedReplicationState = "Completed"
- // AggregatedReplicationStateFailed ...
- AggregatedReplicationStateFailed AggregatedReplicationState = "Failed"
- // AggregatedReplicationStateInProgress ...
- AggregatedReplicationStateInProgress AggregatedReplicationState = "InProgress"
- // AggregatedReplicationStateUnknown ...
- AggregatedReplicationStateUnknown AggregatedReplicationState = "Unknown"
-)
-
-// PossibleAggregatedReplicationStateValues returns an array of possible values for the AggregatedReplicationState const type.
-func PossibleAggregatedReplicationStateValues() []AggregatedReplicationState {
- return []AggregatedReplicationState{AggregatedReplicationStateCompleted, AggregatedReplicationStateFailed, AggregatedReplicationStateInProgress, AggregatedReplicationStateUnknown}
-}
-
-// AvailabilitySetSkuTypes enumerates the values for availability set sku types.
-type AvailabilitySetSkuTypes string
-
-const (
- // AvailabilitySetSkuTypesAligned ...
- AvailabilitySetSkuTypesAligned AvailabilitySetSkuTypes = "Aligned"
- // AvailabilitySetSkuTypesClassic ...
- AvailabilitySetSkuTypesClassic AvailabilitySetSkuTypes = "Classic"
-)
-
-// PossibleAvailabilitySetSkuTypesValues returns an array of possible values for the AvailabilitySetSkuTypes const type.
-func PossibleAvailabilitySetSkuTypesValues() []AvailabilitySetSkuTypes {
- return []AvailabilitySetSkuTypes{AvailabilitySetSkuTypesAligned, AvailabilitySetSkuTypesClassic}
-}
-
-// CachingTypes enumerates the values for caching types.
-type CachingTypes string
-
-const (
- // CachingTypesNone ...
- CachingTypesNone CachingTypes = "None"
- // CachingTypesReadOnly ...
- CachingTypesReadOnly CachingTypes = "ReadOnly"
- // CachingTypesReadWrite ...
- CachingTypesReadWrite CachingTypes = "ReadWrite"
-)
-
-// PossibleCachingTypesValues returns an array of possible values for the CachingTypes const type.
-func PossibleCachingTypesValues() []CachingTypes {
- return []CachingTypes{CachingTypesNone, CachingTypesReadOnly, CachingTypesReadWrite}
-}
-
-// CapacityReservationGroupInstanceViewTypes enumerates the values for capacity reservation group instance view
-// types.
-type CapacityReservationGroupInstanceViewTypes string
-
-const (
- // CapacityReservationGroupInstanceViewTypesInstanceView ...
- CapacityReservationGroupInstanceViewTypesInstanceView CapacityReservationGroupInstanceViewTypes = "instanceView"
-)
-
-// PossibleCapacityReservationGroupInstanceViewTypesValues returns an array of possible values for the CapacityReservationGroupInstanceViewTypes const type.
-func PossibleCapacityReservationGroupInstanceViewTypesValues() []CapacityReservationGroupInstanceViewTypes {
- return []CapacityReservationGroupInstanceViewTypes{CapacityReservationGroupInstanceViewTypesInstanceView}
-}
-
-// CapacityReservationInstanceViewTypes enumerates the values for capacity reservation instance view types.
-type CapacityReservationInstanceViewTypes string
-
-const (
- // CapacityReservationInstanceViewTypesInstanceView ...
- CapacityReservationInstanceViewTypesInstanceView CapacityReservationInstanceViewTypes = "instanceView"
-)
-
-// PossibleCapacityReservationInstanceViewTypesValues returns an array of possible values for the CapacityReservationInstanceViewTypes const type.
-func PossibleCapacityReservationInstanceViewTypesValues() []CapacityReservationInstanceViewTypes {
- return []CapacityReservationInstanceViewTypes{CapacityReservationInstanceViewTypesInstanceView}
-}
-
-// CloudServiceUpgradeMode enumerates the values for cloud service upgrade mode.
-type CloudServiceUpgradeMode string
-
-const (
- // CloudServiceUpgradeModeAuto ...
- CloudServiceUpgradeModeAuto CloudServiceUpgradeMode = "Auto"
- // CloudServiceUpgradeModeManual ...
- CloudServiceUpgradeModeManual CloudServiceUpgradeMode = "Manual"
- // CloudServiceUpgradeModeSimultaneous ...
- CloudServiceUpgradeModeSimultaneous CloudServiceUpgradeMode = "Simultaneous"
-)
-
-// PossibleCloudServiceUpgradeModeValues returns an array of possible values for the CloudServiceUpgradeMode const type.
-func PossibleCloudServiceUpgradeModeValues() []CloudServiceUpgradeMode {
- return []CloudServiceUpgradeMode{CloudServiceUpgradeModeAuto, CloudServiceUpgradeModeManual, CloudServiceUpgradeModeSimultaneous}
-}
-
-// ComponentNames enumerates the values for component names.
-type ComponentNames string
-
-const (
- // ComponentNamesMicrosoftWindowsShellSetup ...
- ComponentNamesMicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup"
-)
-
-// PossibleComponentNamesValues returns an array of possible values for the ComponentNames const type.
-func PossibleComponentNamesValues() []ComponentNames {
- return []ComponentNames{ComponentNamesMicrosoftWindowsShellSetup}
-}
-
-// ConsistencyModeTypes enumerates the values for consistency mode types.
-type ConsistencyModeTypes string
-
-const (
- // ConsistencyModeTypesApplicationConsistent ...
- ConsistencyModeTypesApplicationConsistent ConsistencyModeTypes = "ApplicationConsistent"
- // ConsistencyModeTypesCrashConsistent ...
- ConsistencyModeTypesCrashConsistent ConsistencyModeTypes = "CrashConsistent"
- // ConsistencyModeTypesFileSystemConsistent ...
- ConsistencyModeTypesFileSystemConsistent ConsistencyModeTypes = "FileSystemConsistent"
-)
-
-// PossibleConsistencyModeTypesValues returns an array of possible values for the ConsistencyModeTypes const type.
-func PossibleConsistencyModeTypesValues() []ConsistencyModeTypes {
- return []ConsistencyModeTypes{ConsistencyModeTypesApplicationConsistent, ConsistencyModeTypesCrashConsistent, ConsistencyModeTypesFileSystemConsistent}
-}
-
-// DedicatedHostLicenseTypes enumerates the values for dedicated host license types.
-type DedicatedHostLicenseTypes string
-
-const (
- // DedicatedHostLicenseTypesNone ...
- DedicatedHostLicenseTypesNone DedicatedHostLicenseTypes = "None"
- // DedicatedHostLicenseTypesWindowsServerHybrid ...
- DedicatedHostLicenseTypesWindowsServerHybrid DedicatedHostLicenseTypes = "Windows_Server_Hybrid"
- // DedicatedHostLicenseTypesWindowsServerPerpetual ...
- DedicatedHostLicenseTypesWindowsServerPerpetual DedicatedHostLicenseTypes = "Windows_Server_Perpetual"
-)
-
-// PossibleDedicatedHostLicenseTypesValues returns an array of possible values for the DedicatedHostLicenseTypes const type.
-func PossibleDedicatedHostLicenseTypesValues() []DedicatedHostLicenseTypes {
- return []DedicatedHostLicenseTypes{DedicatedHostLicenseTypesNone, DedicatedHostLicenseTypesWindowsServerHybrid, DedicatedHostLicenseTypesWindowsServerPerpetual}
-}
-
-// DeleteOptions enumerates the values for delete options.
-type DeleteOptions string
-
-const (
- // DeleteOptionsDelete ...
- DeleteOptionsDelete DeleteOptions = "Delete"
- // DeleteOptionsDetach ...
- DeleteOptionsDetach DeleteOptions = "Detach"
-)
-
-// PossibleDeleteOptionsValues returns an array of possible values for the DeleteOptions const type.
-func PossibleDeleteOptionsValues() []DeleteOptions {
- return []DeleteOptions{DeleteOptionsDelete, DeleteOptionsDetach}
-}
-
-// DiffDiskOptions enumerates the values for diff disk options.
-type DiffDiskOptions string
-
-const (
- // DiffDiskOptionsLocal ...
- DiffDiskOptionsLocal DiffDiskOptions = "Local"
-)
-
-// PossibleDiffDiskOptionsValues returns an array of possible values for the DiffDiskOptions const type.
-func PossibleDiffDiskOptionsValues() []DiffDiskOptions {
- return []DiffDiskOptions{DiffDiskOptionsLocal}
-}
-
-// DiffDiskPlacement enumerates the values for diff disk placement.
-type DiffDiskPlacement string
-
-const (
- // DiffDiskPlacementCacheDisk ...
- DiffDiskPlacementCacheDisk DiffDiskPlacement = "CacheDisk"
- // DiffDiskPlacementResourceDisk ...
- DiffDiskPlacementResourceDisk DiffDiskPlacement = "ResourceDisk"
-)
-
-// PossibleDiffDiskPlacementValues returns an array of possible values for the DiffDiskPlacement const type.
-func PossibleDiffDiskPlacementValues() []DiffDiskPlacement {
- return []DiffDiskPlacement{DiffDiskPlacementCacheDisk, DiffDiskPlacementResourceDisk}
-}
-
-// DiskCreateOption enumerates the values for disk create option.
-type DiskCreateOption string
-
-const (
- // DiskCreateOptionAttach Disk will be attached to a VM.
- DiskCreateOptionAttach DiskCreateOption = "Attach"
- // DiskCreateOptionCopy Create a new disk or snapshot by copying from a disk or snapshot specified by the
- // given sourceResourceId.
- DiskCreateOptionCopy DiskCreateOption = "Copy"
- // DiskCreateOptionCopyStart Create a new disk by using a deep copy process, where the resource creation is
- // considered complete only after all data has been copied from the source.
- DiskCreateOptionCopyStart DiskCreateOption = "CopyStart"
- // DiskCreateOptionEmpty Create an empty data disk of a size given by diskSizeGB.
- DiskCreateOptionEmpty DiskCreateOption = "Empty"
- // DiskCreateOptionFromImage Create a new disk from a platform image specified by the given imageReference
- // or galleryImageReference.
- DiskCreateOptionFromImage DiskCreateOption = "FromImage"
- // DiskCreateOptionImport Create a disk by importing from a blob specified by a sourceUri in a storage
- // account specified by storageAccountId.
- DiskCreateOptionImport DiskCreateOption = "Import"
- // DiskCreateOptionRestore Create a new disk by copying from a backup recovery point.
- DiskCreateOptionRestore DiskCreateOption = "Restore"
- // DiskCreateOptionUpload Create a new disk by obtaining a write token and using it to directly upload the
- // contents of the disk.
- DiskCreateOptionUpload DiskCreateOption = "Upload"
-)
-
-// PossibleDiskCreateOptionValues returns an array of possible values for the DiskCreateOption const type.
-func PossibleDiskCreateOptionValues() []DiskCreateOption {
- return []DiskCreateOption{DiskCreateOptionAttach, DiskCreateOptionCopy, DiskCreateOptionCopyStart, DiskCreateOptionEmpty, DiskCreateOptionFromImage, DiskCreateOptionImport, DiskCreateOptionRestore, DiskCreateOptionUpload}
-}
-
-// DiskCreateOptionTypes enumerates the values for disk create option types.
-type DiskCreateOptionTypes string
-
-const (
- // DiskCreateOptionTypesAttach ...
- DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach"
- // DiskCreateOptionTypesEmpty ...
- DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty"
- // DiskCreateOptionTypesFromImage ...
- DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage"
-)
-
-// PossibleDiskCreateOptionTypesValues returns an array of possible values for the DiskCreateOptionTypes const type.
-func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes {
- return []DiskCreateOptionTypes{DiskCreateOptionTypesAttach, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage}
-}
-
-// DiskDeleteOptionTypes enumerates the values for disk delete option types.
-type DiskDeleteOptionTypes string
-
-const (
- // DiskDeleteOptionTypesDelete ...
- DiskDeleteOptionTypesDelete DiskDeleteOptionTypes = "Delete"
- // DiskDeleteOptionTypesDetach ...
- DiskDeleteOptionTypesDetach DiskDeleteOptionTypes = "Detach"
-)
-
-// PossibleDiskDeleteOptionTypesValues returns an array of possible values for the DiskDeleteOptionTypes const type.
-func PossibleDiskDeleteOptionTypesValues() []DiskDeleteOptionTypes {
- return []DiskDeleteOptionTypes{DiskDeleteOptionTypesDelete, DiskDeleteOptionTypesDetach}
-}
-
-// DiskDetachOptionTypes enumerates the values for disk detach option types.
-type DiskDetachOptionTypes string
-
-const (
- // DiskDetachOptionTypesForceDetach ...
- DiskDetachOptionTypesForceDetach DiskDetachOptionTypes = "ForceDetach"
-)
-
-// PossibleDiskDetachOptionTypesValues returns an array of possible values for the DiskDetachOptionTypes const type.
-func PossibleDiskDetachOptionTypesValues() []DiskDetachOptionTypes {
- return []DiskDetachOptionTypes{DiskDetachOptionTypesForceDetach}
-}
-
-// DiskEncryptionSetIdentityType enumerates the values for disk encryption set identity type.
-type DiskEncryptionSetIdentityType string
-
-const (
- // DiskEncryptionSetIdentityTypeNone ...
- DiskEncryptionSetIdentityTypeNone DiskEncryptionSetIdentityType = "None"
- // DiskEncryptionSetIdentityTypeSystemAssigned ...
- DiskEncryptionSetIdentityTypeSystemAssigned DiskEncryptionSetIdentityType = "SystemAssigned"
-)
-
-// PossibleDiskEncryptionSetIdentityTypeValues returns an array of possible values for the DiskEncryptionSetIdentityType const type.
-func PossibleDiskEncryptionSetIdentityTypeValues() []DiskEncryptionSetIdentityType {
- return []DiskEncryptionSetIdentityType{DiskEncryptionSetIdentityTypeNone, DiskEncryptionSetIdentityTypeSystemAssigned}
-}
-
-// DiskEncryptionSetType enumerates the values for disk encryption set type.
-type DiskEncryptionSetType string
-
-const (
- // DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey Resource using diskEncryptionSet would be encrypted
- // at rest with Customer managed key that can be changed and revoked by a customer.
- DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey DiskEncryptionSetType = "EncryptionAtRestWithCustomerKey"
- // DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys Resource using diskEncryptionSet would
- // be encrypted at rest with two layers of encryption. One of the keys is Customer managed and the other
- // key is Platform managed.
- DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys DiskEncryptionSetType = "EncryptionAtRestWithPlatformAndCustomerKeys"
-)
-
-// PossibleDiskEncryptionSetTypeValues returns an array of possible values for the DiskEncryptionSetType const type.
-func PossibleDiskEncryptionSetTypeValues() []DiskEncryptionSetType {
- return []DiskEncryptionSetType{DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey, DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys}
-}
-
-// DiskSecurityTypes enumerates the values for disk security types.
-type DiskSecurityTypes string
-
-const (
- // DiskSecurityTypesTrustedLaunch Trusted Launch provides security features such as secure boot and virtual
- // Trusted Platform Module (vTPM)
- DiskSecurityTypesTrustedLaunch DiskSecurityTypes = "TrustedLaunch"
-)
-
-// PossibleDiskSecurityTypesValues returns an array of possible values for the DiskSecurityTypes const type.
-func PossibleDiskSecurityTypesValues() []DiskSecurityTypes {
- return []DiskSecurityTypes{DiskSecurityTypesTrustedLaunch}
-}
-
-// DiskState enumerates the values for disk state.
-type DiskState string
-
-const (
- // DiskStateActiveSAS The disk currently has an Active SAS Uri associated with it.
- DiskStateActiveSAS DiskState = "ActiveSAS"
- // DiskStateActiveSASFrozen The disk is attached to a VM in hibernated state and has an active SAS URI
- // associated with it.
- DiskStateActiveSASFrozen DiskState = "ActiveSASFrozen"
- // DiskStateActiveUpload A disk is created for upload and a write token has been issued for uploading to
- // it.
- DiskStateActiveUpload DiskState = "ActiveUpload"
- // DiskStateAttached The disk is currently attached to a running VM.
- DiskStateAttached DiskState = "Attached"
- // DiskStateFrozen The disk is attached to a VM which is in hibernated state.
- DiskStateFrozen DiskState = "Frozen"
- // DiskStateReadyToUpload A disk is ready to be created by upload by requesting a write token.
- DiskStateReadyToUpload DiskState = "ReadyToUpload"
- // DiskStateReserved The disk is attached to a stopped-deallocated VM.
- DiskStateReserved DiskState = "Reserved"
- // DiskStateUnattached The disk is not being used and can be attached to a VM.
- DiskStateUnattached DiskState = "Unattached"
-)
-
-// PossibleDiskStateValues returns an array of possible values for the DiskState const type.
-func PossibleDiskStateValues() []DiskState {
- return []DiskState{DiskStateActiveSAS, DiskStateActiveSASFrozen, DiskStateActiveUpload, DiskStateAttached, DiskStateFrozen, DiskStateReadyToUpload, DiskStateReserved, DiskStateUnattached}
-}
-
-// DiskStorageAccountTypes enumerates the values for disk storage account types.
-type DiskStorageAccountTypes string
-
-const (
- // DiskStorageAccountTypesPremiumLRS Premium SSD locally redundant storage. Best for production and
- // performance sensitive workloads.
- DiskStorageAccountTypesPremiumLRS DiskStorageAccountTypes = "Premium_LRS"
- // DiskStorageAccountTypesPremiumZRS Premium SSD zone redundant storage. Best for the production workloads
- // that need storage resiliency against zone failures.
- DiskStorageAccountTypesPremiumZRS DiskStorageAccountTypes = "Premium_ZRS"
- // DiskStorageAccountTypesStandardLRS Standard HDD locally redundant storage. Best for backup,
- // non-critical, and infrequent access.
- DiskStorageAccountTypesStandardLRS DiskStorageAccountTypes = "Standard_LRS"
- // DiskStorageAccountTypesStandardSSDLRS Standard SSD locally redundant storage. Best for web servers,
- // lightly used enterprise applications and dev/test.
- DiskStorageAccountTypesStandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS"
- // DiskStorageAccountTypesStandardSSDZRS Standard SSD zone redundant storage. Best for web servers, lightly
- // used enterprise applications and dev/test that need storage resiliency against zone failures.
- DiskStorageAccountTypesStandardSSDZRS DiskStorageAccountTypes = "StandardSSD_ZRS"
- // DiskStorageAccountTypesUltraSSDLRS Ultra SSD locally redundant storage. Best for IO-intensive workloads
- // such as SAP HANA, top tier databases (for example, SQL, Oracle), and other transaction-heavy workloads.
- DiskStorageAccountTypesUltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS"
-)
-
-// PossibleDiskStorageAccountTypesValues returns an array of possible values for the DiskStorageAccountTypes const type.
-func PossibleDiskStorageAccountTypesValues() []DiskStorageAccountTypes {
- return []DiskStorageAccountTypes{DiskStorageAccountTypesPremiumLRS, DiskStorageAccountTypesPremiumZRS, DiskStorageAccountTypesStandardLRS, DiskStorageAccountTypesStandardSSDLRS, DiskStorageAccountTypesStandardSSDZRS, DiskStorageAccountTypesUltraSSDLRS}
-}
-
-// EncryptionType enumerates the values for encryption type.
-type EncryptionType string
-
-const (
- // EncryptionTypeEncryptionAtRestWithCustomerKey Disk is encrypted at rest with Customer managed key that
- // can be changed and revoked by a customer.
- EncryptionTypeEncryptionAtRestWithCustomerKey EncryptionType = "EncryptionAtRestWithCustomerKey"
- // EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys Disk is encrypted at rest with 2 layers of
- // encryption. One of the keys is Customer managed and the other key is Platform managed.
- EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys EncryptionType = "EncryptionAtRestWithPlatformAndCustomerKeys"
- // EncryptionTypeEncryptionAtRestWithPlatformKey Disk is encrypted at rest with Platform managed key. It is
- // the default encryption type. This is not a valid encryption type for disk encryption sets.
- EncryptionTypeEncryptionAtRestWithPlatformKey EncryptionType = "EncryptionAtRestWithPlatformKey"
-)
-
-// PossibleEncryptionTypeValues returns an array of possible values for the EncryptionType const type.
-func PossibleEncryptionTypeValues() []EncryptionType {
- return []EncryptionType{EncryptionTypeEncryptionAtRestWithCustomerKey, EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys, EncryptionTypeEncryptionAtRestWithPlatformKey}
-}
-
-// ExecutionState enumerates the values for execution state.
-type ExecutionState string
-
-const (
- // ExecutionStateCanceled ...
- ExecutionStateCanceled ExecutionState = "Canceled"
- // ExecutionStateFailed ...
- ExecutionStateFailed ExecutionState = "Failed"
- // ExecutionStatePending ...
- ExecutionStatePending ExecutionState = "Pending"
- // ExecutionStateRunning ...
- ExecutionStateRunning ExecutionState = "Running"
- // ExecutionStateSucceeded ...
- ExecutionStateSucceeded ExecutionState = "Succeeded"
- // ExecutionStateTimedOut ...
- ExecutionStateTimedOut ExecutionState = "TimedOut"
- // ExecutionStateUnknown ...
- ExecutionStateUnknown ExecutionState = "Unknown"
-)
-
-// PossibleExecutionStateValues returns an array of possible values for the ExecutionState const type.
-func PossibleExecutionStateValues() []ExecutionState {
- return []ExecutionState{ExecutionStateCanceled, ExecutionStateFailed, ExecutionStatePending, ExecutionStateRunning, ExecutionStateSucceeded, ExecutionStateTimedOut, ExecutionStateUnknown}
-}
-
-// ExpandTypesForGetCapacityReservationGroups enumerates the values for expand types for get capacity
-// reservation groups.
-type ExpandTypesForGetCapacityReservationGroups string
-
-const (
- // ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref ...
- ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref ExpandTypesForGetCapacityReservationGroups = "virtualMachineScaleSetVMs/$ref"
- // ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref ...
- ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref ExpandTypesForGetCapacityReservationGroups = "virtualMachines/$ref"
-)
-
-// PossibleExpandTypesForGetCapacityReservationGroupsValues returns an array of possible values for the ExpandTypesForGetCapacityReservationGroups const type.
-func PossibleExpandTypesForGetCapacityReservationGroupsValues() []ExpandTypesForGetCapacityReservationGroups {
- return []ExpandTypesForGetCapacityReservationGroups{ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsref, ExpandTypesForGetCapacityReservationGroupsVirtualMachinesref}
-}
-
-// ExpandTypesForGetVMScaleSets enumerates the values for expand types for get vm scale sets.
-type ExpandTypesForGetVMScaleSets string
-
-const (
- // ExpandTypesForGetVMScaleSetsUserData ...
- ExpandTypesForGetVMScaleSetsUserData ExpandTypesForGetVMScaleSets = "userData"
-)
-
-// PossibleExpandTypesForGetVMScaleSetsValues returns an array of possible values for the ExpandTypesForGetVMScaleSets const type.
-func PossibleExpandTypesForGetVMScaleSetsValues() []ExpandTypesForGetVMScaleSets {
- return []ExpandTypesForGetVMScaleSets{ExpandTypesForGetVMScaleSetsUserData}
-}
-
-// ExtendedLocationType enumerates the values for extended location type.
-type ExtendedLocationType string
-
-const (
- // ExtendedLocationTypeEdgeZone ...
- ExtendedLocationTypeEdgeZone ExtendedLocationType = "EdgeZone"
-)
-
-// PossibleExtendedLocationTypeValues returns an array of possible values for the ExtendedLocationType const type.
-func PossibleExtendedLocationTypeValues() []ExtendedLocationType {
- return []ExtendedLocationType{ExtendedLocationTypeEdgeZone}
-}
-
-// ExtendedLocationTypes enumerates the values for extended location types.
-type ExtendedLocationTypes string
-
-const (
- // ExtendedLocationTypesEdgeZone ...
- ExtendedLocationTypesEdgeZone ExtendedLocationTypes = "EdgeZone"
-)
-
-// PossibleExtendedLocationTypesValues returns an array of possible values for the ExtendedLocationTypes const type.
-func PossibleExtendedLocationTypesValues() []ExtendedLocationTypes {
- return []ExtendedLocationTypes{ExtendedLocationTypesEdgeZone}
-}
-
-// GallerySharingPermissionTypes enumerates the values for gallery sharing permission types.
-type GallerySharingPermissionTypes string
-
-const (
- // GallerySharingPermissionTypesGroups ...
- GallerySharingPermissionTypesGroups GallerySharingPermissionTypes = "Groups"
- // GallerySharingPermissionTypesPrivate ...
- GallerySharingPermissionTypesPrivate GallerySharingPermissionTypes = "Private"
-)
-
-// PossibleGallerySharingPermissionTypesValues returns an array of possible values for the GallerySharingPermissionTypes const type.
-func PossibleGallerySharingPermissionTypesValues() []GallerySharingPermissionTypes {
- return []GallerySharingPermissionTypes{GallerySharingPermissionTypesGroups, GallerySharingPermissionTypesPrivate}
-}
-
-// HostCaching enumerates the values for host caching.
-type HostCaching string
-
-const (
- // HostCachingNone ...
- HostCachingNone HostCaching = "None"
- // HostCachingReadOnly ...
- HostCachingReadOnly HostCaching = "ReadOnly"
- // HostCachingReadWrite ...
- HostCachingReadWrite HostCaching = "ReadWrite"
-)
-
-// PossibleHostCachingValues returns an array of possible values for the HostCaching const type.
-func PossibleHostCachingValues() []HostCaching {
- return []HostCaching{HostCachingNone, HostCachingReadOnly, HostCachingReadWrite}
-}
-
-// HyperVGeneration enumerates the values for hyper v generation.
-type HyperVGeneration string
-
-const (
- // HyperVGenerationV1 ...
- HyperVGenerationV1 HyperVGeneration = "V1"
- // HyperVGenerationV2 ...
- HyperVGenerationV2 HyperVGeneration = "V2"
-)
-
-// PossibleHyperVGenerationValues returns an array of possible values for the HyperVGeneration const type.
-func PossibleHyperVGenerationValues() []HyperVGeneration {
- return []HyperVGeneration{HyperVGenerationV1, HyperVGenerationV2}
-}
-
-// HyperVGenerationType enumerates the values for hyper v generation type.
-type HyperVGenerationType string
-
-const (
- // HyperVGenerationTypeV1 ...
- HyperVGenerationTypeV1 HyperVGenerationType = "V1"
- // HyperVGenerationTypeV2 ...
- HyperVGenerationTypeV2 HyperVGenerationType = "V2"
-)
-
-// PossibleHyperVGenerationTypeValues returns an array of possible values for the HyperVGenerationType const type.
-func PossibleHyperVGenerationTypeValues() []HyperVGenerationType {
- return []HyperVGenerationType{HyperVGenerationTypeV1, HyperVGenerationTypeV2}
-}
-
-// HyperVGenerationTypes enumerates the values for hyper v generation types.
-type HyperVGenerationTypes string
-
-const (
- // HyperVGenerationTypesV1 ...
- HyperVGenerationTypesV1 HyperVGenerationTypes = "V1"
- // HyperVGenerationTypesV2 ...
- HyperVGenerationTypesV2 HyperVGenerationTypes = "V2"
-)
-
-// PossibleHyperVGenerationTypesValues returns an array of possible values for the HyperVGenerationTypes const type.
-func PossibleHyperVGenerationTypesValues() []HyperVGenerationTypes {
- return []HyperVGenerationTypes{HyperVGenerationTypesV1, HyperVGenerationTypesV2}
-}
-
-// InstanceViewTypes enumerates the values for instance view types.
-type InstanceViewTypes string
-
-const (
- // InstanceViewTypesInstanceView ...
- InstanceViewTypesInstanceView InstanceViewTypes = "instanceView"
- // InstanceViewTypesUserData ...
- InstanceViewTypesUserData InstanceViewTypes = "userData"
-)
-
-// PossibleInstanceViewTypesValues returns an array of possible values for the InstanceViewTypes const type.
-func PossibleInstanceViewTypesValues() []InstanceViewTypes {
- return []InstanceViewTypes{InstanceViewTypesInstanceView, InstanceViewTypesUserData}
-}
-
-// IntervalInMins enumerates the values for interval in mins.
-type IntervalInMins string
-
-const (
- // IntervalInMinsFiveMins ...
- IntervalInMinsFiveMins IntervalInMins = "FiveMins"
- // IntervalInMinsSixtyMins ...
- IntervalInMinsSixtyMins IntervalInMins = "SixtyMins"
- // IntervalInMinsThirtyMins ...
- IntervalInMinsThirtyMins IntervalInMins = "ThirtyMins"
- // IntervalInMinsThreeMins ...
- IntervalInMinsThreeMins IntervalInMins = "ThreeMins"
-)
-
-// PossibleIntervalInMinsValues returns an array of possible values for the IntervalInMins const type.
-func PossibleIntervalInMinsValues() []IntervalInMins {
- return []IntervalInMins{IntervalInMinsFiveMins, IntervalInMinsSixtyMins, IntervalInMinsThirtyMins, IntervalInMinsThreeMins}
-}
-
-// IPVersion enumerates the values for ip version.
-type IPVersion string
-
-const (
- // IPVersionIPv4 ...
- IPVersionIPv4 IPVersion = "IPv4"
- // IPVersionIPv6 ...
- IPVersionIPv6 IPVersion = "IPv6"
-)
-
-// PossibleIPVersionValues returns an array of possible values for the IPVersion const type.
-func PossibleIPVersionValues() []IPVersion {
- return []IPVersion{IPVersionIPv4, IPVersionIPv6}
-}
-
-// IPVersions enumerates the values for ip versions.
-type IPVersions string
-
-const (
- // IPVersionsIPv4 ...
- IPVersionsIPv4 IPVersions = "IPv4"
- // IPVersionsIPv6 ...
- IPVersionsIPv6 IPVersions = "IPv6"
-)
-
-// PossibleIPVersionsValues returns an array of possible values for the IPVersions const type.
-func PossibleIPVersionsValues() []IPVersions {
- return []IPVersions{IPVersionsIPv4, IPVersionsIPv6}
-}
-
-// LinuxPatchAssessmentMode enumerates the values for linux patch assessment mode.
-type LinuxPatchAssessmentMode string
-
-const (
- // LinuxPatchAssessmentModeAutomaticByPlatform ...
- LinuxPatchAssessmentModeAutomaticByPlatform LinuxPatchAssessmentMode = "AutomaticByPlatform"
- // LinuxPatchAssessmentModeImageDefault ...
- LinuxPatchAssessmentModeImageDefault LinuxPatchAssessmentMode = "ImageDefault"
-)
-
-// PossibleLinuxPatchAssessmentModeValues returns an array of possible values for the LinuxPatchAssessmentMode const type.
-func PossibleLinuxPatchAssessmentModeValues() []LinuxPatchAssessmentMode {
- return []LinuxPatchAssessmentMode{LinuxPatchAssessmentModeAutomaticByPlatform, LinuxPatchAssessmentModeImageDefault}
-}
-
-// LinuxVMGuestPatchMode enumerates the values for linux vm guest patch mode.
-type LinuxVMGuestPatchMode string
-
-const (
- // LinuxVMGuestPatchModeAutomaticByPlatform ...
- LinuxVMGuestPatchModeAutomaticByPlatform LinuxVMGuestPatchMode = "AutomaticByPlatform"
- // LinuxVMGuestPatchModeImageDefault ...
- LinuxVMGuestPatchModeImageDefault LinuxVMGuestPatchMode = "ImageDefault"
-)
-
-// PossibleLinuxVMGuestPatchModeValues returns an array of possible values for the LinuxVMGuestPatchMode const type.
-func PossibleLinuxVMGuestPatchModeValues() []LinuxVMGuestPatchMode {
- return []LinuxVMGuestPatchMode{LinuxVMGuestPatchModeAutomaticByPlatform, LinuxVMGuestPatchModeImageDefault}
-}
-
-// MaintenanceOperationResultCodeTypes enumerates the values for maintenance operation result code types.
-type MaintenanceOperationResultCodeTypes string
-
-const (
- // MaintenanceOperationResultCodeTypesMaintenanceAborted ...
- MaintenanceOperationResultCodeTypesMaintenanceAborted MaintenanceOperationResultCodeTypes = "MaintenanceAborted"
- // MaintenanceOperationResultCodeTypesMaintenanceCompleted ...
- MaintenanceOperationResultCodeTypesMaintenanceCompleted MaintenanceOperationResultCodeTypes = "MaintenanceCompleted"
- // MaintenanceOperationResultCodeTypesNone ...
- MaintenanceOperationResultCodeTypesNone MaintenanceOperationResultCodeTypes = "None"
- // MaintenanceOperationResultCodeTypesRetryLater ...
- MaintenanceOperationResultCodeTypesRetryLater MaintenanceOperationResultCodeTypes = "RetryLater"
-)
-
-// PossibleMaintenanceOperationResultCodeTypesValues returns an array of possible values for the MaintenanceOperationResultCodeTypes const type.
-func PossibleMaintenanceOperationResultCodeTypesValues() []MaintenanceOperationResultCodeTypes {
- return []MaintenanceOperationResultCodeTypes{MaintenanceOperationResultCodeTypesMaintenanceAborted, MaintenanceOperationResultCodeTypesMaintenanceCompleted, MaintenanceOperationResultCodeTypesNone, MaintenanceOperationResultCodeTypesRetryLater}
-}
-
-// NetworkAccessPolicy enumerates the values for network access policy.
-type NetworkAccessPolicy string
-
-const (
- // NetworkAccessPolicyAllowAll The disk can be exported or uploaded to from any network.
- NetworkAccessPolicyAllowAll NetworkAccessPolicy = "AllowAll"
- // NetworkAccessPolicyAllowPrivate The disk can be exported or uploaded to using a DiskAccess resource's
- // private endpoints.
- NetworkAccessPolicyAllowPrivate NetworkAccessPolicy = "AllowPrivate"
- // NetworkAccessPolicyDenyAll The disk cannot be exported.
- NetworkAccessPolicyDenyAll NetworkAccessPolicy = "DenyAll"
-)
-
-// PossibleNetworkAccessPolicyValues returns an array of possible values for the NetworkAccessPolicy const type.
-func PossibleNetworkAccessPolicyValues() []NetworkAccessPolicy {
- return []NetworkAccessPolicy{NetworkAccessPolicyAllowAll, NetworkAccessPolicyAllowPrivate, NetworkAccessPolicyDenyAll}
-}
-
-// NetworkAPIVersion enumerates the values for network api version.
-type NetworkAPIVersion string
-
-const (
- // NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne ...
- NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne NetworkAPIVersion = "2020-11-01"
-)
-
-// PossibleNetworkAPIVersionValues returns an array of possible values for the NetworkAPIVersion const type.
-func PossibleNetworkAPIVersionValues() []NetworkAPIVersion {
- return []NetworkAPIVersion{NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne}
-}
-
-// OperatingSystemStateTypes enumerates the values for operating system state types.
-type OperatingSystemStateTypes string
-
-const (
- // OperatingSystemStateTypesGeneralized Generalized image. Needs to be provisioned during deployment time.
- OperatingSystemStateTypesGeneralized OperatingSystemStateTypes = "Generalized"
- // OperatingSystemStateTypesSpecialized Specialized image. Contains already provisioned OS Disk.
- OperatingSystemStateTypesSpecialized OperatingSystemStateTypes = "Specialized"
-)
-
-// PossibleOperatingSystemStateTypesValues returns an array of possible values for the OperatingSystemStateTypes const type.
-func PossibleOperatingSystemStateTypesValues() []OperatingSystemStateTypes {
- return []OperatingSystemStateTypes{OperatingSystemStateTypesGeneralized, OperatingSystemStateTypesSpecialized}
-}
-
-// OperatingSystemType enumerates the values for operating system type.
-type OperatingSystemType string
-
-const (
- // OperatingSystemTypeLinux ...
- OperatingSystemTypeLinux OperatingSystemType = "Linux"
- // OperatingSystemTypeWindows ...
- OperatingSystemTypeWindows OperatingSystemType = "Windows"
-)
-
-// PossibleOperatingSystemTypeValues returns an array of possible values for the OperatingSystemType const type.
-func PossibleOperatingSystemTypeValues() []OperatingSystemType {
- return []OperatingSystemType{OperatingSystemTypeLinux, OperatingSystemTypeWindows}
-}
-
-// OperatingSystemTypes enumerates the values for operating system types.
-type OperatingSystemTypes string
-
-const (
- // OperatingSystemTypesLinux ...
- OperatingSystemTypesLinux OperatingSystemTypes = "Linux"
- // OperatingSystemTypesWindows ...
- OperatingSystemTypesWindows OperatingSystemTypes = "Windows"
-)
-
-// PossibleOperatingSystemTypesValues returns an array of possible values for the OperatingSystemTypes const type.
-func PossibleOperatingSystemTypesValues() []OperatingSystemTypes {
- return []OperatingSystemTypes{OperatingSystemTypesLinux, OperatingSystemTypesWindows}
-}
-
-// OrchestrationMode enumerates the values for orchestration mode.
-type OrchestrationMode string
-
-const (
- // OrchestrationModeFlexible ...
- OrchestrationModeFlexible OrchestrationMode = "Flexible"
- // OrchestrationModeUniform ...
- OrchestrationModeUniform OrchestrationMode = "Uniform"
-)
-
-// PossibleOrchestrationModeValues returns an array of possible values for the OrchestrationMode const type.
-func PossibleOrchestrationModeValues() []OrchestrationMode {
- return []OrchestrationMode{OrchestrationModeFlexible, OrchestrationModeUniform}
-}
-
-// OrchestrationServiceNames enumerates the values for orchestration service names.
-type OrchestrationServiceNames string
-
-const (
- // OrchestrationServiceNamesAutomaticRepairs ...
- OrchestrationServiceNamesAutomaticRepairs OrchestrationServiceNames = "AutomaticRepairs"
-)
-
-// PossibleOrchestrationServiceNamesValues returns an array of possible values for the OrchestrationServiceNames const type.
-func PossibleOrchestrationServiceNamesValues() []OrchestrationServiceNames {
- return []OrchestrationServiceNames{OrchestrationServiceNamesAutomaticRepairs}
-}
-
-// OrchestrationServiceState enumerates the values for orchestration service state.
-type OrchestrationServiceState string
-
-const (
- // OrchestrationServiceStateNotRunning ...
- OrchestrationServiceStateNotRunning OrchestrationServiceState = "NotRunning"
- // OrchestrationServiceStateRunning ...
- OrchestrationServiceStateRunning OrchestrationServiceState = "Running"
- // OrchestrationServiceStateSuspended ...
- OrchestrationServiceStateSuspended OrchestrationServiceState = "Suspended"
-)
-
-// PossibleOrchestrationServiceStateValues returns an array of possible values for the OrchestrationServiceState const type.
-func PossibleOrchestrationServiceStateValues() []OrchestrationServiceState {
- return []OrchestrationServiceState{OrchestrationServiceStateNotRunning, OrchestrationServiceStateRunning, OrchestrationServiceStateSuspended}
-}
-
-// OrchestrationServiceStateAction enumerates the values for orchestration service state action.
-type OrchestrationServiceStateAction string
-
-const (
- // OrchestrationServiceStateActionResume ...
- OrchestrationServiceStateActionResume OrchestrationServiceStateAction = "Resume"
- // OrchestrationServiceStateActionSuspend ...
- OrchestrationServiceStateActionSuspend OrchestrationServiceStateAction = "Suspend"
-)
-
-// PossibleOrchestrationServiceStateActionValues returns an array of possible values for the OrchestrationServiceStateAction const type.
-func PossibleOrchestrationServiceStateActionValues() []OrchestrationServiceStateAction {
- return []OrchestrationServiceStateAction{OrchestrationServiceStateActionResume, OrchestrationServiceStateActionSuspend}
-}
-
-// PassNames enumerates the values for pass names.
-type PassNames string
-
-const (
- // PassNamesOobeSystem ...
- PassNamesOobeSystem PassNames = "OobeSystem"
-)
-
-// PossiblePassNamesValues returns an array of possible values for the PassNames const type.
-func PossiblePassNamesValues() []PassNames {
- return []PassNames{PassNamesOobeSystem}
-}
-
-// PatchAssessmentState enumerates the values for patch assessment state.
-type PatchAssessmentState string
-
-const (
- // PatchAssessmentStateAvailable ...
- PatchAssessmentStateAvailable PatchAssessmentState = "Available"
- // PatchAssessmentStateUnknown ...
- PatchAssessmentStateUnknown PatchAssessmentState = "Unknown"
-)
-
-// PossiblePatchAssessmentStateValues returns an array of possible values for the PatchAssessmentState const type.
-func PossiblePatchAssessmentStateValues() []PatchAssessmentState {
- return []PatchAssessmentState{PatchAssessmentStateAvailable, PatchAssessmentStateUnknown}
-}
-
-// PatchInstallationState enumerates the values for patch installation state.
-type PatchInstallationState string
-
-const (
- // PatchInstallationStateExcluded ...
- PatchInstallationStateExcluded PatchInstallationState = "Excluded"
- // PatchInstallationStateFailed ...
- PatchInstallationStateFailed PatchInstallationState = "Failed"
- // PatchInstallationStateInstalled ...
- PatchInstallationStateInstalled PatchInstallationState = "Installed"
- // PatchInstallationStateNotSelected ...
- PatchInstallationStateNotSelected PatchInstallationState = "NotSelected"
- // PatchInstallationStatePending ...
- PatchInstallationStatePending PatchInstallationState = "Pending"
- // PatchInstallationStateUnknown ...
- PatchInstallationStateUnknown PatchInstallationState = "Unknown"
-)
-
-// PossiblePatchInstallationStateValues returns an array of possible values for the PatchInstallationState const type.
-func PossiblePatchInstallationStateValues() []PatchInstallationState {
- return []PatchInstallationState{PatchInstallationStateExcluded, PatchInstallationStateFailed, PatchInstallationStateInstalled, PatchInstallationStateNotSelected, PatchInstallationStatePending, PatchInstallationStateUnknown}
-}
-
-// PatchOperationStatus enumerates the values for patch operation status.
-type PatchOperationStatus string
-
-const (
- // PatchOperationStatusCompletedWithWarnings ...
- PatchOperationStatusCompletedWithWarnings PatchOperationStatus = "CompletedWithWarnings"
- // PatchOperationStatusFailed ...
- PatchOperationStatusFailed PatchOperationStatus = "Failed"
- // PatchOperationStatusInProgress ...
- PatchOperationStatusInProgress PatchOperationStatus = "InProgress"
- // PatchOperationStatusSucceeded ...
- PatchOperationStatusSucceeded PatchOperationStatus = "Succeeded"
- // PatchOperationStatusUnknown ...
- PatchOperationStatusUnknown PatchOperationStatus = "Unknown"
-)
-
-// PossiblePatchOperationStatusValues returns an array of possible values for the PatchOperationStatus const type.
-func PossiblePatchOperationStatusValues() []PatchOperationStatus {
- return []PatchOperationStatus{PatchOperationStatusCompletedWithWarnings, PatchOperationStatusFailed, PatchOperationStatusInProgress, PatchOperationStatusSucceeded, PatchOperationStatusUnknown}
-}
-
-// PrivateEndpointConnectionProvisioningState enumerates the values for private endpoint connection
-// provisioning state.
-type PrivateEndpointConnectionProvisioningState string
-
-const (
- // PrivateEndpointConnectionProvisioningStateCreating ...
- PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating"
- // PrivateEndpointConnectionProvisioningStateDeleting ...
- PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting"
- // PrivateEndpointConnectionProvisioningStateFailed ...
- PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed"
- // PrivateEndpointConnectionProvisioningStateSucceeded ...
- PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded"
-)
-
-// PossiblePrivateEndpointConnectionProvisioningStateValues returns an array of possible values for the PrivateEndpointConnectionProvisioningState const type.
-func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState {
- return []PrivateEndpointConnectionProvisioningState{PrivateEndpointConnectionProvisioningStateCreating, PrivateEndpointConnectionProvisioningStateDeleting, PrivateEndpointConnectionProvisioningStateFailed, PrivateEndpointConnectionProvisioningStateSucceeded}
-}
-
-// PrivateEndpointServiceConnectionStatus enumerates the values for private endpoint service connection status.
-type PrivateEndpointServiceConnectionStatus string
-
-const (
- // PrivateEndpointServiceConnectionStatusApproved ...
- PrivateEndpointServiceConnectionStatusApproved PrivateEndpointServiceConnectionStatus = "Approved"
- // PrivateEndpointServiceConnectionStatusPending ...
- PrivateEndpointServiceConnectionStatusPending PrivateEndpointServiceConnectionStatus = "Pending"
- // PrivateEndpointServiceConnectionStatusRejected ...
- PrivateEndpointServiceConnectionStatusRejected PrivateEndpointServiceConnectionStatus = "Rejected"
-)
-
-// PossiblePrivateEndpointServiceConnectionStatusValues returns an array of possible values for the PrivateEndpointServiceConnectionStatus const type.
-func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus {
- return []PrivateEndpointServiceConnectionStatus{PrivateEndpointServiceConnectionStatusApproved, PrivateEndpointServiceConnectionStatusPending, PrivateEndpointServiceConnectionStatusRejected}
-}
-
-// ProtocolTypes enumerates the values for protocol types.
-type ProtocolTypes string
-
-const (
- // ProtocolTypesHTTP ...
- ProtocolTypesHTTP ProtocolTypes = "Http"
- // ProtocolTypesHTTPS ...
- ProtocolTypesHTTPS ProtocolTypes = "Https"
-)
-
-// PossibleProtocolTypesValues returns an array of possible values for the ProtocolTypes const type.
-func PossibleProtocolTypesValues() []ProtocolTypes {
- return []ProtocolTypes{ProtocolTypesHTTP, ProtocolTypesHTTPS}
-}
-
-// ProvisioningState enumerates the values for provisioning state.
-type ProvisioningState string
-
-const (
- // ProvisioningStateCreating ...
- ProvisioningStateCreating ProvisioningState = "Creating"
- // ProvisioningStateDeleting ...
- ProvisioningStateDeleting ProvisioningState = "Deleting"
- // ProvisioningStateFailed ...
- ProvisioningStateFailed ProvisioningState = "Failed"
- // ProvisioningStateMigrating ...
- ProvisioningStateMigrating ProvisioningState = "Migrating"
- // ProvisioningStateSucceeded ...
- ProvisioningStateSucceeded ProvisioningState = "Succeeded"
- // ProvisioningStateUpdating ...
- ProvisioningStateUpdating ProvisioningState = "Updating"
-)
-
-// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
-func PossibleProvisioningStateValues() []ProvisioningState {
- return []ProvisioningState{ProvisioningStateCreating, ProvisioningStateDeleting, ProvisioningStateFailed, ProvisioningStateMigrating, ProvisioningStateSucceeded, ProvisioningStateUpdating}
-}
-
-// ProvisioningState1 enumerates the values for provisioning state 1.
-type ProvisioningState1 string
-
-const (
- // ProvisioningState1Creating ...
- ProvisioningState1Creating ProvisioningState1 = "Creating"
- // ProvisioningState1Deleting ...
- ProvisioningState1Deleting ProvisioningState1 = "Deleting"
- // ProvisioningState1Failed ...
- ProvisioningState1Failed ProvisioningState1 = "Failed"
- // ProvisioningState1Migrating ...
- ProvisioningState1Migrating ProvisioningState1 = "Migrating"
- // ProvisioningState1Succeeded ...
- ProvisioningState1Succeeded ProvisioningState1 = "Succeeded"
- // ProvisioningState1Updating ...
- ProvisioningState1Updating ProvisioningState1 = "Updating"
-)
-
-// PossibleProvisioningState1Values returns an array of possible values for the ProvisioningState1 const type.
-func PossibleProvisioningState1Values() []ProvisioningState1 {
- return []ProvisioningState1{ProvisioningState1Creating, ProvisioningState1Deleting, ProvisioningState1Failed, ProvisioningState1Migrating, ProvisioningState1Succeeded, ProvisioningState1Updating}
-}
-
-// ProvisioningState2 enumerates the values for provisioning state 2.
-type ProvisioningState2 string
-
-const (
- // ProvisioningState2Creating ...
- ProvisioningState2Creating ProvisioningState2 = "Creating"
- // ProvisioningState2Deleting ...
- ProvisioningState2Deleting ProvisioningState2 = "Deleting"
- // ProvisioningState2Failed ...
- ProvisioningState2Failed ProvisioningState2 = "Failed"
- // ProvisioningState2Migrating ...
- ProvisioningState2Migrating ProvisioningState2 = "Migrating"
- // ProvisioningState2Succeeded ...
- ProvisioningState2Succeeded ProvisioningState2 = "Succeeded"
- // ProvisioningState2Updating ...
- ProvisioningState2Updating ProvisioningState2 = "Updating"
-)
-
-// PossibleProvisioningState2Values returns an array of possible values for the ProvisioningState2 const type.
-func PossibleProvisioningState2Values() []ProvisioningState2 {
- return []ProvisioningState2{ProvisioningState2Creating, ProvisioningState2Deleting, ProvisioningState2Failed, ProvisioningState2Migrating, ProvisioningState2Succeeded, ProvisioningState2Updating}
-}
-
-// ProvisioningState3 enumerates the values for provisioning state 3.
-type ProvisioningState3 string
-
-const (
- // ProvisioningState3Creating ...
- ProvisioningState3Creating ProvisioningState3 = "Creating"
- // ProvisioningState3Deleting ...
- ProvisioningState3Deleting ProvisioningState3 = "Deleting"
- // ProvisioningState3Failed ...
- ProvisioningState3Failed ProvisioningState3 = "Failed"
- // ProvisioningState3Migrating ...
- ProvisioningState3Migrating ProvisioningState3 = "Migrating"
- // ProvisioningState3Succeeded ...
- ProvisioningState3Succeeded ProvisioningState3 = "Succeeded"
- // ProvisioningState3Updating ...
- ProvisioningState3Updating ProvisioningState3 = "Updating"
-)
-
-// PossibleProvisioningState3Values returns an array of possible values for the ProvisioningState3 const type.
-func PossibleProvisioningState3Values() []ProvisioningState3 {
- return []ProvisioningState3{ProvisioningState3Creating, ProvisioningState3Deleting, ProvisioningState3Failed, ProvisioningState3Migrating, ProvisioningState3Succeeded, ProvisioningState3Updating}
-}
-
-// ProximityPlacementGroupType enumerates the values for proximity placement group type.
-type ProximityPlacementGroupType string
-
-const (
- // ProximityPlacementGroupTypeStandard ...
- ProximityPlacementGroupTypeStandard ProximityPlacementGroupType = "Standard"
- // ProximityPlacementGroupTypeUltra ...
- ProximityPlacementGroupTypeUltra ProximityPlacementGroupType = "Ultra"
-)
-
-// PossibleProximityPlacementGroupTypeValues returns an array of possible values for the ProximityPlacementGroupType const type.
-func PossibleProximityPlacementGroupTypeValues() []ProximityPlacementGroupType {
- return []ProximityPlacementGroupType{ProximityPlacementGroupTypeStandard, ProximityPlacementGroupTypeUltra}
-}
-
-// PublicIPAddressSkuName enumerates the values for public ip address sku name.
-type PublicIPAddressSkuName string
-
-const (
- // PublicIPAddressSkuNameBasic ...
- PublicIPAddressSkuNameBasic PublicIPAddressSkuName = "Basic"
- // PublicIPAddressSkuNameStandard ...
- PublicIPAddressSkuNameStandard PublicIPAddressSkuName = "Standard"
-)
-
-// PossiblePublicIPAddressSkuNameValues returns an array of possible values for the PublicIPAddressSkuName const type.
-func PossiblePublicIPAddressSkuNameValues() []PublicIPAddressSkuName {
- return []PublicIPAddressSkuName{PublicIPAddressSkuNameBasic, PublicIPAddressSkuNameStandard}
-}
-
-// PublicIPAddressSkuTier enumerates the values for public ip address sku tier.
-type PublicIPAddressSkuTier string
-
-const (
- // PublicIPAddressSkuTierGlobal ...
- PublicIPAddressSkuTierGlobal PublicIPAddressSkuTier = "Global"
- // PublicIPAddressSkuTierRegional ...
- PublicIPAddressSkuTierRegional PublicIPAddressSkuTier = "Regional"
-)
-
-// PossiblePublicIPAddressSkuTierValues returns an array of possible values for the PublicIPAddressSkuTier const type.
-func PossiblePublicIPAddressSkuTierValues() []PublicIPAddressSkuTier {
- return []PublicIPAddressSkuTier{PublicIPAddressSkuTierGlobal, PublicIPAddressSkuTierRegional}
-}
-
-// PublicIPAllocationMethod enumerates the values for public ip allocation method.
-type PublicIPAllocationMethod string
-
-const (
- // PublicIPAllocationMethodDynamic ...
- PublicIPAllocationMethodDynamic PublicIPAllocationMethod = "Dynamic"
- // PublicIPAllocationMethodStatic ...
- PublicIPAllocationMethodStatic PublicIPAllocationMethod = "Static"
-)
-
-// PossiblePublicIPAllocationMethodValues returns an array of possible values for the PublicIPAllocationMethod const type.
-func PossiblePublicIPAllocationMethodValues() []PublicIPAllocationMethod {
- return []PublicIPAllocationMethod{PublicIPAllocationMethodDynamic, PublicIPAllocationMethodStatic}
-}
-
-// PublicNetworkAccess enumerates the values for public network access.
-type PublicNetworkAccess string
-
-const (
- // PublicNetworkAccessDisabled You cannot access the underlying data of the disk publicly on the internet
- // even when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI only from your
- // trusted Azure VNET when NetworkAccessPolicy is set to AllowPrivate.
- PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled"
- // PublicNetworkAccessEnabled You can generate a SAS URI to access the underlying data of the disk publicly
- // on the internet when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI
- // only from your trusted Azure VNET when NetworkAccessPolicy is set to AllowPrivate.
- PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled"
-)
-
-// PossiblePublicNetworkAccessValues returns an array of possible values for the PublicNetworkAccess const type.
-func PossiblePublicNetworkAccessValues() []PublicNetworkAccess {
- return []PublicNetworkAccess{PublicNetworkAccessDisabled, PublicNetworkAccessEnabled}
-}
-
-// ReplicationMode enumerates the values for replication mode.
-type ReplicationMode string
-
-const (
- // ReplicationModeFull ...
- ReplicationModeFull ReplicationMode = "Full"
- // ReplicationModeShallow ...
- ReplicationModeShallow ReplicationMode = "Shallow"
-)
-
-// PossibleReplicationModeValues returns an array of possible values for the ReplicationMode const type.
-func PossibleReplicationModeValues() []ReplicationMode {
- return []ReplicationMode{ReplicationModeFull, ReplicationModeShallow}
-}
-
-// ReplicationState enumerates the values for replication state.
-type ReplicationState string
-
-const (
- // ReplicationStateCompleted ...
- ReplicationStateCompleted ReplicationState = "Completed"
- // ReplicationStateFailed ...
- ReplicationStateFailed ReplicationState = "Failed"
- // ReplicationStateReplicating ...
- ReplicationStateReplicating ReplicationState = "Replicating"
- // ReplicationStateUnknown ...
- ReplicationStateUnknown ReplicationState = "Unknown"
-)
-
-// PossibleReplicationStateValues returns an array of possible values for the ReplicationState const type.
-func PossibleReplicationStateValues() []ReplicationState {
- return []ReplicationState{ReplicationStateCompleted, ReplicationStateFailed, ReplicationStateReplicating, ReplicationStateUnknown}
-}
-
-// ReplicationStatusTypes enumerates the values for replication status types.
-type ReplicationStatusTypes string
-
-const (
- // ReplicationStatusTypesReplicationStatus ...
- ReplicationStatusTypesReplicationStatus ReplicationStatusTypes = "ReplicationStatus"
-)
-
-// PossibleReplicationStatusTypesValues returns an array of possible values for the ReplicationStatusTypes const type.
-func PossibleReplicationStatusTypesValues() []ReplicationStatusTypes {
- return []ReplicationStatusTypes{ReplicationStatusTypesReplicationStatus}
-}
-
-// ResourceIdentityType enumerates the values for resource identity type.
-type ResourceIdentityType string
-
-const (
- // ResourceIdentityTypeNone ...
- ResourceIdentityTypeNone ResourceIdentityType = "None"
- // ResourceIdentityTypeSystemAssigned ...
- ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned"
- // ResourceIdentityTypeSystemAssignedUserAssigned ...
- ResourceIdentityTypeSystemAssignedUserAssigned ResourceIdentityType = "SystemAssigned, UserAssigned"
- // ResourceIdentityTypeUserAssigned ...
- ResourceIdentityTypeUserAssigned ResourceIdentityType = "UserAssigned"
-)
-
-// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type.
-func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
- return []ResourceIdentityType{ResourceIdentityTypeNone, ResourceIdentityTypeSystemAssigned, ResourceIdentityTypeSystemAssignedUserAssigned, ResourceIdentityTypeUserAssigned}
-}
-
-// ResourceSkuCapacityScaleType enumerates the values for resource sku capacity scale type.
-type ResourceSkuCapacityScaleType string
-
-const (
- // ResourceSkuCapacityScaleTypeAutomatic ...
- ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = "Automatic"
- // ResourceSkuCapacityScaleTypeManual ...
- ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = "Manual"
- // ResourceSkuCapacityScaleTypeNone ...
- ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = "None"
-)
-
-// PossibleResourceSkuCapacityScaleTypeValues returns an array of possible values for the ResourceSkuCapacityScaleType const type.
-func PossibleResourceSkuCapacityScaleTypeValues() []ResourceSkuCapacityScaleType {
- return []ResourceSkuCapacityScaleType{ResourceSkuCapacityScaleTypeAutomatic, ResourceSkuCapacityScaleTypeManual, ResourceSkuCapacityScaleTypeNone}
-}
-
-// ResourceSkuRestrictionsReasonCode enumerates the values for resource sku restrictions reason code.
-type ResourceSkuRestrictionsReasonCode string
-
-const (
- // ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription ...
- ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription"
- // ResourceSkuRestrictionsReasonCodeQuotaID ...
- ResourceSkuRestrictionsReasonCodeQuotaID ResourceSkuRestrictionsReasonCode = "QuotaId"
-)
-
-// PossibleResourceSkuRestrictionsReasonCodeValues returns an array of possible values for the ResourceSkuRestrictionsReasonCode const type.
-func PossibleResourceSkuRestrictionsReasonCodeValues() []ResourceSkuRestrictionsReasonCode {
- return []ResourceSkuRestrictionsReasonCode{ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription, ResourceSkuRestrictionsReasonCodeQuotaID}
-}
-
-// ResourceSkuRestrictionsType enumerates the values for resource sku restrictions type.
-type ResourceSkuRestrictionsType string
-
-const (
- // ResourceSkuRestrictionsTypeLocation ...
- ResourceSkuRestrictionsTypeLocation ResourceSkuRestrictionsType = "Location"
- // ResourceSkuRestrictionsTypeZone ...
- ResourceSkuRestrictionsTypeZone ResourceSkuRestrictionsType = "Zone"
-)
-
-// PossibleResourceSkuRestrictionsTypeValues returns an array of possible values for the ResourceSkuRestrictionsType const type.
-func PossibleResourceSkuRestrictionsTypeValues() []ResourceSkuRestrictionsType {
- return []ResourceSkuRestrictionsType{ResourceSkuRestrictionsTypeLocation, ResourceSkuRestrictionsTypeZone}
-}
-
-// RestorePointCollectionExpandOptions enumerates the values for restore point collection expand options.
-type RestorePointCollectionExpandOptions string
-
-const (
- // RestorePointCollectionExpandOptionsRestorePoints ...
- RestorePointCollectionExpandOptionsRestorePoints RestorePointCollectionExpandOptions = "restorePoints"
-)
-
-// PossibleRestorePointCollectionExpandOptionsValues returns an array of possible values for the RestorePointCollectionExpandOptions const type.
-func PossibleRestorePointCollectionExpandOptionsValues() []RestorePointCollectionExpandOptions {
- return []RestorePointCollectionExpandOptions{RestorePointCollectionExpandOptionsRestorePoints}
-}
-
-// RollingUpgradeActionType enumerates the values for rolling upgrade action type.
-type RollingUpgradeActionType string
-
-const (
- // RollingUpgradeActionTypeCancel ...
- RollingUpgradeActionTypeCancel RollingUpgradeActionType = "Cancel"
- // RollingUpgradeActionTypeStart ...
- RollingUpgradeActionTypeStart RollingUpgradeActionType = "Start"
-)
-
-// PossibleRollingUpgradeActionTypeValues returns an array of possible values for the RollingUpgradeActionType const type.
-func PossibleRollingUpgradeActionTypeValues() []RollingUpgradeActionType {
- return []RollingUpgradeActionType{RollingUpgradeActionTypeCancel, RollingUpgradeActionTypeStart}
-}
-
-// RollingUpgradeStatusCode enumerates the values for rolling upgrade status code.
-type RollingUpgradeStatusCode string
-
-const (
- // RollingUpgradeStatusCodeCancelled ...
- RollingUpgradeStatusCodeCancelled RollingUpgradeStatusCode = "Cancelled"
- // RollingUpgradeStatusCodeCompleted ...
- RollingUpgradeStatusCodeCompleted RollingUpgradeStatusCode = "Completed"
- // RollingUpgradeStatusCodeFaulted ...
- RollingUpgradeStatusCodeFaulted RollingUpgradeStatusCode = "Faulted"
- // RollingUpgradeStatusCodeRollingForward ...
- RollingUpgradeStatusCodeRollingForward RollingUpgradeStatusCode = "RollingForward"
-)
-
-// PossibleRollingUpgradeStatusCodeValues returns an array of possible values for the RollingUpgradeStatusCode const type.
-func PossibleRollingUpgradeStatusCodeValues() []RollingUpgradeStatusCode {
- return []RollingUpgradeStatusCode{RollingUpgradeStatusCodeCancelled, RollingUpgradeStatusCodeCompleted, RollingUpgradeStatusCodeFaulted, RollingUpgradeStatusCodeRollingForward}
-}
-
-// SecurityTypes enumerates the values for security types.
-type SecurityTypes string
-
-const (
- // SecurityTypesTrustedLaunch ...
- SecurityTypesTrustedLaunch SecurityTypes = "TrustedLaunch"
-)
-
-// PossibleSecurityTypesValues returns an array of possible values for the SecurityTypes const type.
-func PossibleSecurityTypesValues() []SecurityTypes {
- return []SecurityTypes{SecurityTypesTrustedLaunch}
-}
-
-// SelectPermissions enumerates the values for select permissions.
-type SelectPermissions string
-
-const (
- // SelectPermissionsPermissions ...
- SelectPermissionsPermissions SelectPermissions = "Permissions"
-)
-
-// PossibleSelectPermissionsValues returns an array of possible values for the SelectPermissions const type.
-func PossibleSelectPermissionsValues() []SelectPermissions {
- return []SelectPermissions{SelectPermissionsPermissions}
-}
-
-// SettingNames enumerates the values for setting names.
-type SettingNames string
-
-const (
- // SettingNamesAutoLogon ...
- SettingNamesAutoLogon SettingNames = "AutoLogon"
- // SettingNamesFirstLogonCommands ...
- SettingNamesFirstLogonCommands SettingNames = "FirstLogonCommands"
-)
-
-// PossibleSettingNamesValues returns an array of possible values for the SettingNames const type.
-func PossibleSettingNamesValues() []SettingNames {
- return []SettingNames{SettingNamesAutoLogon, SettingNamesFirstLogonCommands}
-}
-
-// SharedToValues enumerates the values for shared to values.
-type SharedToValues string
-
-const (
- // SharedToValuesTenant ...
- SharedToValuesTenant SharedToValues = "tenant"
-)
-
-// PossibleSharedToValuesValues returns an array of possible values for the SharedToValues const type.
-func PossibleSharedToValuesValues() []SharedToValues {
- return []SharedToValues{SharedToValuesTenant}
-}
-
-// SharingProfileGroupTypes enumerates the values for sharing profile group types.
-type SharingProfileGroupTypes string
-
-const (
- // SharingProfileGroupTypesAADTenants ...
- SharingProfileGroupTypesAADTenants SharingProfileGroupTypes = "AADTenants"
- // SharingProfileGroupTypesSubscriptions ...
- SharingProfileGroupTypesSubscriptions SharingProfileGroupTypes = "Subscriptions"
-)
-
-// PossibleSharingProfileGroupTypesValues returns an array of possible values for the SharingProfileGroupTypes const type.
-func PossibleSharingProfileGroupTypesValues() []SharingProfileGroupTypes {
- return []SharingProfileGroupTypes{SharingProfileGroupTypesAADTenants, SharingProfileGroupTypesSubscriptions}
-}
-
-// SharingUpdateOperationTypes enumerates the values for sharing update operation types.
-type SharingUpdateOperationTypes string
-
-const (
- // SharingUpdateOperationTypesAdd ...
- SharingUpdateOperationTypesAdd SharingUpdateOperationTypes = "Add"
- // SharingUpdateOperationTypesRemove ...
- SharingUpdateOperationTypesRemove SharingUpdateOperationTypes = "Remove"
- // SharingUpdateOperationTypesReset ...
- SharingUpdateOperationTypesReset SharingUpdateOperationTypes = "Reset"
-)
-
-// PossibleSharingUpdateOperationTypesValues returns an array of possible values for the SharingUpdateOperationTypes const type.
-func PossibleSharingUpdateOperationTypesValues() []SharingUpdateOperationTypes {
- return []SharingUpdateOperationTypes{SharingUpdateOperationTypesAdd, SharingUpdateOperationTypesRemove, SharingUpdateOperationTypesReset}
-}
-
-// SnapshotStorageAccountTypes enumerates the values for snapshot storage account types.
-type SnapshotStorageAccountTypes string
-
-const (
- // SnapshotStorageAccountTypesPremiumLRS Premium SSD locally redundant storage
- SnapshotStorageAccountTypesPremiumLRS SnapshotStorageAccountTypes = "Premium_LRS"
- // SnapshotStorageAccountTypesStandardLRS Standard HDD locally redundant storage
- SnapshotStorageAccountTypesStandardLRS SnapshotStorageAccountTypes = "Standard_LRS"
- // SnapshotStorageAccountTypesStandardZRS Standard zone redundant storage
- SnapshotStorageAccountTypesStandardZRS SnapshotStorageAccountTypes = "Standard_ZRS"
-)
-
-// PossibleSnapshotStorageAccountTypesValues returns an array of possible values for the SnapshotStorageAccountTypes const type.
-func PossibleSnapshotStorageAccountTypesValues() []SnapshotStorageAccountTypes {
- return []SnapshotStorageAccountTypes{SnapshotStorageAccountTypesPremiumLRS, SnapshotStorageAccountTypesStandardLRS, SnapshotStorageAccountTypesStandardZRS}
-}
-
-// StatusLevelTypes enumerates the values for status level types.
-type StatusLevelTypes string
-
-const (
- // StatusLevelTypesError ...
- StatusLevelTypesError StatusLevelTypes = "Error"
- // StatusLevelTypesInfo ...
- StatusLevelTypesInfo StatusLevelTypes = "Info"
- // StatusLevelTypesWarning ...
- StatusLevelTypesWarning StatusLevelTypes = "Warning"
-)
-
-// PossibleStatusLevelTypesValues returns an array of possible values for the StatusLevelTypes const type.
-func PossibleStatusLevelTypesValues() []StatusLevelTypes {
- return []StatusLevelTypes{StatusLevelTypesError, StatusLevelTypesInfo, StatusLevelTypesWarning}
-}
-
-// StorageAccountType enumerates the values for storage account type.
-type StorageAccountType string
-
-const (
- // StorageAccountTypePremiumLRS ...
- StorageAccountTypePremiumLRS StorageAccountType = "Premium_LRS"
- // StorageAccountTypeStandardLRS ...
- StorageAccountTypeStandardLRS StorageAccountType = "Standard_LRS"
- // StorageAccountTypeStandardZRS ...
- StorageAccountTypeStandardZRS StorageAccountType = "Standard_ZRS"
-)
-
-// PossibleStorageAccountTypeValues returns an array of possible values for the StorageAccountType const type.
-func PossibleStorageAccountTypeValues() []StorageAccountType {
- return []StorageAccountType{StorageAccountTypePremiumLRS, StorageAccountTypeStandardLRS, StorageAccountTypeStandardZRS}
-}
-
-// StorageAccountTypes enumerates the values for storage account types.
-type StorageAccountTypes string
-
-const (
- // StorageAccountTypesPremiumLRS ...
- StorageAccountTypesPremiumLRS StorageAccountTypes = "Premium_LRS"
- // StorageAccountTypesPremiumZRS ...
- StorageAccountTypesPremiumZRS StorageAccountTypes = "Premium_ZRS"
- // StorageAccountTypesStandardLRS ...
- StorageAccountTypesStandardLRS StorageAccountTypes = "Standard_LRS"
- // StorageAccountTypesStandardSSDLRS ...
- StorageAccountTypesStandardSSDLRS StorageAccountTypes = "StandardSSD_LRS"
- // StorageAccountTypesStandardSSDZRS ...
- StorageAccountTypesStandardSSDZRS StorageAccountTypes = "StandardSSD_ZRS"
- // StorageAccountTypesUltraSSDLRS ...
- StorageAccountTypesUltraSSDLRS StorageAccountTypes = "UltraSSD_LRS"
-)
-
-// PossibleStorageAccountTypesValues returns an array of possible values for the StorageAccountTypes const type.
-func PossibleStorageAccountTypesValues() []StorageAccountTypes {
- return []StorageAccountTypes{StorageAccountTypesPremiumLRS, StorageAccountTypesPremiumZRS, StorageAccountTypesStandardLRS, StorageAccountTypesStandardSSDLRS, StorageAccountTypesStandardSSDZRS, StorageAccountTypesUltraSSDLRS}
-}
-
-// UpgradeMode enumerates the values for upgrade mode.
-type UpgradeMode string
-
-const (
- // UpgradeModeAutomatic ...
- UpgradeModeAutomatic UpgradeMode = "Automatic"
- // UpgradeModeManual ...
- UpgradeModeManual UpgradeMode = "Manual"
- // UpgradeModeRolling ...
- UpgradeModeRolling UpgradeMode = "Rolling"
-)
-
-// PossibleUpgradeModeValues returns an array of possible values for the UpgradeMode const type.
-func PossibleUpgradeModeValues() []UpgradeMode {
- return []UpgradeMode{UpgradeModeAutomatic, UpgradeModeManual, UpgradeModeRolling}
-}
-
-// UpgradeOperationInvoker enumerates the values for upgrade operation invoker.
-type UpgradeOperationInvoker string
-
-const (
- // UpgradeOperationInvokerPlatform ...
- UpgradeOperationInvokerPlatform UpgradeOperationInvoker = "Platform"
- // UpgradeOperationInvokerUnknown ...
- UpgradeOperationInvokerUnknown UpgradeOperationInvoker = "Unknown"
- // UpgradeOperationInvokerUser ...
- UpgradeOperationInvokerUser UpgradeOperationInvoker = "User"
-)
-
-// PossibleUpgradeOperationInvokerValues returns an array of possible values for the UpgradeOperationInvoker const type.
-func PossibleUpgradeOperationInvokerValues() []UpgradeOperationInvoker {
- return []UpgradeOperationInvoker{UpgradeOperationInvokerPlatform, UpgradeOperationInvokerUnknown, UpgradeOperationInvokerUser}
-}
-
-// UpgradeState enumerates the values for upgrade state.
-type UpgradeState string
-
-const (
- // UpgradeStateCancelled ...
- UpgradeStateCancelled UpgradeState = "Cancelled"
- // UpgradeStateCompleted ...
- UpgradeStateCompleted UpgradeState = "Completed"
- // UpgradeStateFaulted ...
- UpgradeStateFaulted UpgradeState = "Faulted"
- // UpgradeStateRollingForward ...
- UpgradeStateRollingForward UpgradeState = "RollingForward"
-)
-
-// PossibleUpgradeStateValues returns an array of possible values for the UpgradeState const type.
-func PossibleUpgradeStateValues() []UpgradeState {
- return []UpgradeState{UpgradeStateCancelled, UpgradeStateCompleted, UpgradeStateFaulted, UpgradeStateRollingForward}
-}
-
-// VirtualMachineEvictionPolicyTypes enumerates the values for virtual machine eviction policy types.
-type VirtualMachineEvictionPolicyTypes string
-
-const (
- // VirtualMachineEvictionPolicyTypesDeallocate ...
- VirtualMachineEvictionPolicyTypesDeallocate VirtualMachineEvictionPolicyTypes = "Deallocate"
- // VirtualMachineEvictionPolicyTypesDelete ...
- VirtualMachineEvictionPolicyTypesDelete VirtualMachineEvictionPolicyTypes = "Delete"
-)
-
-// PossibleVirtualMachineEvictionPolicyTypesValues returns an array of possible values for the VirtualMachineEvictionPolicyTypes const type.
-func PossibleVirtualMachineEvictionPolicyTypesValues() []VirtualMachineEvictionPolicyTypes {
- return []VirtualMachineEvictionPolicyTypes{VirtualMachineEvictionPolicyTypesDeallocate, VirtualMachineEvictionPolicyTypesDelete}
-}
-
-// VirtualMachinePriorityTypes enumerates the values for virtual machine priority types.
-type VirtualMachinePriorityTypes string
-
-const (
- // VirtualMachinePriorityTypesLow ...
- VirtualMachinePriorityTypesLow VirtualMachinePriorityTypes = "Low"
- // VirtualMachinePriorityTypesRegular ...
- VirtualMachinePriorityTypesRegular VirtualMachinePriorityTypes = "Regular"
- // VirtualMachinePriorityTypesSpot ...
- VirtualMachinePriorityTypesSpot VirtualMachinePriorityTypes = "Spot"
-)
-
-// PossibleVirtualMachinePriorityTypesValues returns an array of possible values for the VirtualMachinePriorityTypes const type.
-func PossibleVirtualMachinePriorityTypesValues() []VirtualMachinePriorityTypes {
- return []VirtualMachinePriorityTypes{VirtualMachinePriorityTypesLow, VirtualMachinePriorityTypesRegular, VirtualMachinePriorityTypesSpot}
-}
-
-// VirtualMachineScaleSetScaleInRules enumerates the values for virtual machine scale set scale in rules.
-type VirtualMachineScaleSetScaleInRules string
-
-const (
- // VirtualMachineScaleSetScaleInRulesDefault ...
- VirtualMachineScaleSetScaleInRulesDefault VirtualMachineScaleSetScaleInRules = "Default"
- // VirtualMachineScaleSetScaleInRulesNewestVM ...
- VirtualMachineScaleSetScaleInRulesNewestVM VirtualMachineScaleSetScaleInRules = "NewestVM"
- // VirtualMachineScaleSetScaleInRulesOldestVM ...
- VirtualMachineScaleSetScaleInRulesOldestVM VirtualMachineScaleSetScaleInRules = "OldestVM"
-)
-
-// PossibleVirtualMachineScaleSetScaleInRulesValues returns an array of possible values for the VirtualMachineScaleSetScaleInRules const type.
-func PossibleVirtualMachineScaleSetScaleInRulesValues() []VirtualMachineScaleSetScaleInRules {
- return []VirtualMachineScaleSetScaleInRules{VirtualMachineScaleSetScaleInRulesDefault, VirtualMachineScaleSetScaleInRulesNewestVM, VirtualMachineScaleSetScaleInRulesOldestVM}
-}
-
-// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine scale set sku scale type.
-type VirtualMachineScaleSetSkuScaleType string
-
-const (
- // VirtualMachineScaleSetSkuScaleTypeAutomatic ...
- VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = "Automatic"
- // VirtualMachineScaleSetSkuScaleTypeNone ...
- VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = "None"
-)
-
-// PossibleVirtualMachineScaleSetSkuScaleTypeValues returns an array of possible values for the VirtualMachineScaleSetSkuScaleType const type.
-func PossibleVirtualMachineScaleSetSkuScaleTypeValues() []VirtualMachineScaleSetSkuScaleType {
- return []VirtualMachineScaleSetSkuScaleType{VirtualMachineScaleSetSkuScaleTypeAutomatic, VirtualMachineScaleSetSkuScaleTypeNone}
-}
-
-// VirtualMachineSizeTypes enumerates the values for virtual machine size types.
-type VirtualMachineSizeTypes string
-
-const (
- // VirtualMachineSizeTypesBasicA0 ...
- VirtualMachineSizeTypesBasicA0 VirtualMachineSizeTypes = "Basic_A0"
- // VirtualMachineSizeTypesBasicA1 ...
- VirtualMachineSizeTypesBasicA1 VirtualMachineSizeTypes = "Basic_A1"
- // VirtualMachineSizeTypesBasicA2 ...
- VirtualMachineSizeTypesBasicA2 VirtualMachineSizeTypes = "Basic_A2"
- // VirtualMachineSizeTypesBasicA3 ...
- VirtualMachineSizeTypesBasicA3 VirtualMachineSizeTypes = "Basic_A3"
- // VirtualMachineSizeTypesBasicA4 ...
- VirtualMachineSizeTypesBasicA4 VirtualMachineSizeTypes = "Basic_A4"
- // VirtualMachineSizeTypesStandardA0 ...
- VirtualMachineSizeTypesStandardA0 VirtualMachineSizeTypes = "Standard_A0"
- // VirtualMachineSizeTypesStandardA1 ...
- VirtualMachineSizeTypesStandardA1 VirtualMachineSizeTypes = "Standard_A1"
- // VirtualMachineSizeTypesStandardA10 ...
- VirtualMachineSizeTypesStandardA10 VirtualMachineSizeTypes = "Standard_A10"
- // VirtualMachineSizeTypesStandardA11 ...
- VirtualMachineSizeTypesStandardA11 VirtualMachineSizeTypes = "Standard_A11"
- // VirtualMachineSizeTypesStandardA1V2 ...
- VirtualMachineSizeTypesStandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2"
- // VirtualMachineSizeTypesStandardA2 ...
- VirtualMachineSizeTypesStandardA2 VirtualMachineSizeTypes = "Standard_A2"
- // VirtualMachineSizeTypesStandardA2mV2 ...
- VirtualMachineSizeTypesStandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2"
- // VirtualMachineSizeTypesStandardA2V2 ...
- VirtualMachineSizeTypesStandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2"
- // VirtualMachineSizeTypesStandardA3 ...
- VirtualMachineSizeTypesStandardA3 VirtualMachineSizeTypes = "Standard_A3"
- // VirtualMachineSizeTypesStandardA4 ...
- VirtualMachineSizeTypesStandardA4 VirtualMachineSizeTypes = "Standard_A4"
- // VirtualMachineSizeTypesStandardA4mV2 ...
- VirtualMachineSizeTypesStandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2"
- // VirtualMachineSizeTypesStandardA4V2 ...
- VirtualMachineSizeTypesStandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2"
- // VirtualMachineSizeTypesStandardA5 ...
- VirtualMachineSizeTypesStandardA5 VirtualMachineSizeTypes = "Standard_A5"
- // VirtualMachineSizeTypesStandardA6 ...
- VirtualMachineSizeTypesStandardA6 VirtualMachineSizeTypes = "Standard_A6"
- // VirtualMachineSizeTypesStandardA7 ...
- VirtualMachineSizeTypesStandardA7 VirtualMachineSizeTypes = "Standard_A7"
- // VirtualMachineSizeTypesStandardA8 ...
- VirtualMachineSizeTypesStandardA8 VirtualMachineSizeTypes = "Standard_A8"
- // VirtualMachineSizeTypesStandardA8mV2 ...
- VirtualMachineSizeTypesStandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2"
- // VirtualMachineSizeTypesStandardA8V2 ...
- VirtualMachineSizeTypesStandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2"
- // VirtualMachineSizeTypesStandardA9 ...
- VirtualMachineSizeTypesStandardA9 VirtualMachineSizeTypes = "Standard_A9"
- // VirtualMachineSizeTypesStandardB1ms ...
- VirtualMachineSizeTypesStandardB1ms VirtualMachineSizeTypes = "Standard_B1ms"
- // VirtualMachineSizeTypesStandardB1s ...
- VirtualMachineSizeTypesStandardB1s VirtualMachineSizeTypes = "Standard_B1s"
- // VirtualMachineSizeTypesStandardB2ms ...
- VirtualMachineSizeTypesStandardB2ms VirtualMachineSizeTypes = "Standard_B2ms"
- // VirtualMachineSizeTypesStandardB2s ...
- VirtualMachineSizeTypesStandardB2s VirtualMachineSizeTypes = "Standard_B2s"
- // VirtualMachineSizeTypesStandardB4ms ...
- VirtualMachineSizeTypesStandardB4ms VirtualMachineSizeTypes = "Standard_B4ms"
- // VirtualMachineSizeTypesStandardB8ms ...
- VirtualMachineSizeTypesStandardB8ms VirtualMachineSizeTypes = "Standard_B8ms"
- // VirtualMachineSizeTypesStandardD1 ...
- VirtualMachineSizeTypesStandardD1 VirtualMachineSizeTypes = "Standard_D1"
- // VirtualMachineSizeTypesStandardD11 ...
- VirtualMachineSizeTypesStandardD11 VirtualMachineSizeTypes = "Standard_D11"
- // VirtualMachineSizeTypesStandardD11V2 ...
- VirtualMachineSizeTypesStandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2"
- // VirtualMachineSizeTypesStandardD12 ...
- VirtualMachineSizeTypesStandardD12 VirtualMachineSizeTypes = "Standard_D12"
- // VirtualMachineSizeTypesStandardD12V2 ...
- VirtualMachineSizeTypesStandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2"
- // VirtualMachineSizeTypesStandardD13 ...
- VirtualMachineSizeTypesStandardD13 VirtualMachineSizeTypes = "Standard_D13"
- // VirtualMachineSizeTypesStandardD13V2 ...
- VirtualMachineSizeTypesStandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2"
- // VirtualMachineSizeTypesStandardD14 ...
- VirtualMachineSizeTypesStandardD14 VirtualMachineSizeTypes = "Standard_D14"
- // VirtualMachineSizeTypesStandardD14V2 ...
- VirtualMachineSizeTypesStandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2"
- // VirtualMachineSizeTypesStandardD15V2 ...
- VirtualMachineSizeTypesStandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2"
- // VirtualMachineSizeTypesStandardD16sV3 ...
- VirtualMachineSizeTypesStandardD16sV3 VirtualMachineSizeTypes = "Standard_D16s_v3"
- // VirtualMachineSizeTypesStandardD16V3 ...
- VirtualMachineSizeTypesStandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3"
- // VirtualMachineSizeTypesStandardD1V2 ...
- VirtualMachineSizeTypesStandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2"
- // VirtualMachineSizeTypesStandardD2 ...
- VirtualMachineSizeTypesStandardD2 VirtualMachineSizeTypes = "Standard_D2"
- // VirtualMachineSizeTypesStandardD2sV3 ...
- VirtualMachineSizeTypesStandardD2sV3 VirtualMachineSizeTypes = "Standard_D2s_v3"
- // VirtualMachineSizeTypesStandardD2V2 ...
- VirtualMachineSizeTypesStandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2"
- // VirtualMachineSizeTypesStandardD2V3 ...
- VirtualMachineSizeTypesStandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3"
- // VirtualMachineSizeTypesStandardD3 ...
- VirtualMachineSizeTypesStandardD3 VirtualMachineSizeTypes = "Standard_D3"
- // VirtualMachineSizeTypesStandardD32sV3 ...
- VirtualMachineSizeTypesStandardD32sV3 VirtualMachineSizeTypes = "Standard_D32s_v3"
- // VirtualMachineSizeTypesStandardD32V3 ...
- VirtualMachineSizeTypesStandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3"
- // VirtualMachineSizeTypesStandardD3V2 ...
- VirtualMachineSizeTypesStandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2"
- // VirtualMachineSizeTypesStandardD4 ...
- VirtualMachineSizeTypesStandardD4 VirtualMachineSizeTypes = "Standard_D4"
- // VirtualMachineSizeTypesStandardD4sV3 ...
- VirtualMachineSizeTypesStandardD4sV3 VirtualMachineSizeTypes = "Standard_D4s_v3"
- // VirtualMachineSizeTypesStandardD4V2 ...
- VirtualMachineSizeTypesStandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2"
- // VirtualMachineSizeTypesStandardD4V3 ...
- VirtualMachineSizeTypesStandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3"
- // VirtualMachineSizeTypesStandardD5V2 ...
- VirtualMachineSizeTypesStandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2"
- // VirtualMachineSizeTypesStandardD64sV3 ...
- VirtualMachineSizeTypesStandardD64sV3 VirtualMachineSizeTypes = "Standard_D64s_v3"
- // VirtualMachineSizeTypesStandardD64V3 ...
- VirtualMachineSizeTypesStandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3"
- // VirtualMachineSizeTypesStandardD8sV3 ...
- VirtualMachineSizeTypesStandardD8sV3 VirtualMachineSizeTypes = "Standard_D8s_v3"
- // VirtualMachineSizeTypesStandardD8V3 ...
- VirtualMachineSizeTypesStandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3"
- // VirtualMachineSizeTypesStandardDS1 ...
- VirtualMachineSizeTypesStandardDS1 VirtualMachineSizeTypes = "Standard_DS1"
- // VirtualMachineSizeTypesStandardDS11 ...
- VirtualMachineSizeTypesStandardDS11 VirtualMachineSizeTypes = "Standard_DS11"
- // VirtualMachineSizeTypesStandardDS11V2 ...
- VirtualMachineSizeTypesStandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2"
- // VirtualMachineSizeTypesStandardDS12 ...
- VirtualMachineSizeTypesStandardDS12 VirtualMachineSizeTypes = "Standard_DS12"
- // VirtualMachineSizeTypesStandardDS12V2 ...
- VirtualMachineSizeTypesStandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2"
- // VirtualMachineSizeTypesStandardDS13 ...
- VirtualMachineSizeTypesStandardDS13 VirtualMachineSizeTypes = "Standard_DS13"
- // VirtualMachineSizeTypesStandardDS132V2 ...
- VirtualMachineSizeTypesStandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2"
- // VirtualMachineSizeTypesStandardDS134V2 ...
- VirtualMachineSizeTypesStandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2"
- // VirtualMachineSizeTypesStandardDS13V2 ...
- VirtualMachineSizeTypesStandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2"
- // VirtualMachineSizeTypesStandardDS14 ...
- VirtualMachineSizeTypesStandardDS14 VirtualMachineSizeTypes = "Standard_DS14"
- // VirtualMachineSizeTypesStandardDS144V2 ...
- VirtualMachineSizeTypesStandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2"
- // VirtualMachineSizeTypesStandardDS148V2 ...
- VirtualMachineSizeTypesStandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2"
- // VirtualMachineSizeTypesStandardDS14V2 ...
- VirtualMachineSizeTypesStandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2"
- // VirtualMachineSizeTypesStandardDS15V2 ...
- VirtualMachineSizeTypesStandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2"
- // VirtualMachineSizeTypesStandardDS1V2 ...
- VirtualMachineSizeTypesStandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2"
- // VirtualMachineSizeTypesStandardDS2 ...
- VirtualMachineSizeTypesStandardDS2 VirtualMachineSizeTypes = "Standard_DS2"
- // VirtualMachineSizeTypesStandardDS2V2 ...
- VirtualMachineSizeTypesStandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2"
- // VirtualMachineSizeTypesStandardDS3 ...
- VirtualMachineSizeTypesStandardDS3 VirtualMachineSizeTypes = "Standard_DS3"
- // VirtualMachineSizeTypesStandardDS3V2 ...
- VirtualMachineSizeTypesStandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2"
- // VirtualMachineSizeTypesStandardDS4 ...
- VirtualMachineSizeTypesStandardDS4 VirtualMachineSizeTypes = "Standard_DS4"
- // VirtualMachineSizeTypesStandardDS4V2 ...
- VirtualMachineSizeTypesStandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2"
- // VirtualMachineSizeTypesStandardDS5V2 ...
- VirtualMachineSizeTypesStandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2"
- // VirtualMachineSizeTypesStandardE16sV3 ...
- VirtualMachineSizeTypesStandardE16sV3 VirtualMachineSizeTypes = "Standard_E16s_v3"
- // VirtualMachineSizeTypesStandardE16V3 ...
- VirtualMachineSizeTypesStandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3"
- // VirtualMachineSizeTypesStandardE2sV3 ...
- VirtualMachineSizeTypesStandardE2sV3 VirtualMachineSizeTypes = "Standard_E2s_v3"
- // VirtualMachineSizeTypesStandardE2V3 ...
- VirtualMachineSizeTypesStandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3"
- // VirtualMachineSizeTypesStandardE3216V3 ...
- VirtualMachineSizeTypesStandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3"
- // VirtualMachineSizeTypesStandardE328sV3 ...
- VirtualMachineSizeTypesStandardE328sV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3"
- // VirtualMachineSizeTypesStandardE32sV3 ...
- VirtualMachineSizeTypesStandardE32sV3 VirtualMachineSizeTypes = "Standard_E32s_v3"
- // VirtualMachineSizeTypesStandardE32V3 ...
- VirtualMachineSizeTypesStandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3"
- // VirtualMachineSizeTypesStandardE4sV3 ...
- VirtualMachineSizeTypesStandardE4sV3 VirtualMachineSizeTypes = "Standard_E4s_v3"
- // VirtualMachineSizeTypesStandardE4V3 ...
- VirtualMachineSizeTypesStandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3"
- // VirtualMachineSizeTypesStandardE6416sV3 ...
- VirtualMachineSizeTypesStandardE6416sV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3"
- // VirtualMachineSizeTypesStandardE6432sV3 ...
- VirtualMachineSizeTypesStandardE6432sV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3"
- // VirtualMachineSizeTypesStandardE64sV3 ...
- VirtualMachineSizeTypesStandardE64sV3 VirtualMachineSizeTypes = "Standard_E64s_v3"
- // VirtualMachineSizeTypesStandardE64V3 ...
- VirtualMachineSizeTypesStandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3"
- // VirtualMachineSizeTypesStandardE8sV3 ...
- VirtualMachineSizeTypesStandardE8sV3 VirtualMachineSizeTypes = "Standard_E8s_v3"
- // VirtualMachineSizeTypesStandardE8V3 ...
- VirtualMachineSizeTypesStandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3"
- // VirtualMachineSizeTypesStandardF1 ...
- VirtualMachineSizeTypesStandardF1 VirtualMachineSizeTypes = "Standard_F1"
- // VirtualMachineSizeTypesStandardF16 ...
- VirtualMachineSizeTypesStandardF16 VirtualMachineSizeTypes = "Standard_F16"
- // VirtualMachineSizeTypesStandardF16s ...
- VirtualMachineSizeTypesStandardF16s VirtualMachineSizeTypes = "Standard_F16s"
- // VirtualMachineSizeTypesStandardF16sV2 ...
- VirtualMachineSizeTypesStandardF16sV2 VirtualMachineSizeTypes = "Standard_F16s_v2"
- // VirtualMachineSizeTypesStandardF1s ...
- VirtualMachineSizeTypesStandardF1s VirtualMachineSizeTypes = "Standard_F1s"
- // VirtualMachineSizeTypesStandardF2 ...
- VirtualMachineSizeTypesStandardF2 VirtualMachineSizeTypes = "Standard_F2"
- // VirtualMachineSizeTypesStandardF2s ...
- VirtualMachineSizeTypesStandardF2s VirtualMachineSizeTypes = "Standard_F2s"
- // VirtualMachineSizeTypesStandardF2sV2 ...
- VirtualMachineSizeTypesStandardF2sV2 VirtualMachineSizeTypes = "Standard_F2s_v2"
- // VirtualMachineSizeTypesStandardF32sV2 ...
- VirtualMachineSizeTypesStandardF32sV2 VirtualMachineSizeTypes = "Standard_F32s_v2"
- // VirtualMachineSizeTypesStandardF4 ...
- VirtualMachineSizeTypesStandardF4 VirtualMachineSizeTypes = "Standard_F4"
- // VirtualMachineSizeTypesStandardF4s ...
- VirtualMachineSizeTypesStandardF4s VirtualMachineSizeTypes = "Standard_F4s"
- // VirtualMachineSizeTypesStandardF4sV2 ...
- VirtualMachineSizeTypesStandardF4sV2 VirtualMachineSizeTypes = "Standard_F4s_v2"
- // VirtualMachineSizeTypesStandardF64sV2 ...
- VirtualMachineSizeTypesStandardF64sV2 VirtualMachineSizeTypes = "Standard_F64s_v2"
- // VirtualMachineSizeTypesStandardF72sV2 ...
- VirtualMachineSizeTypesStandardF72sV2 VirtualMachineSizeTypes = "Standard_F72s_v2"
- // VirtualMachineSizeTypesStandardF8 ...
- VirtualMachineSizeTypesStandardF8 VirtualMachineSizeTypes = "Standard_F8"
- // VirtualMachineSizeTypesStandardF8s ...
- VirtualMachineSizeTypesStandardF8s VirtualMachineSizeTypes = "Standard_F8s"
- // VirtualMachineSizeTypesStandardF8sV2 ...
- VirtualMachineSizeTypesStandardF8sV2 VirtualMachineSizeTypes = "Standard_F8s_v2"
- // VirtualMachineSizeTypesStandardG1 ...
- VirtualMachineSizeTypesStandardG1 VirtualMachineSizeTypes = "Standard_G1"
- // VirtualMachineSizeTypesStandardG2 ...
- VirtualMachineSizeTypesStandardG2 VirtualMachineSizeTypes = "Standard_G2"
- // VirtualMachineSizeTypesStandardG3 ...
- VirtualMachineSizeTypesStandardG3 VirtualMachineSizeTypes = "Standard_G3"
- // VirtualMachineSizeTypesStandardG4 ...
- VirtualMachineSizeTypesStandardG4 VirtualMachineSizeTypes = "Standard_G4"
- // VirtualMachineSizeTypesStandardG5 ...
- VirtualMachineSizeTypesStandardG5 VirtualMachineSizeTypes = "Standard_G5"
- // VirtualMachineSizeTypesStandardGS1 ...
- VirtualMachineSizeTypesStandardGS1 VirtualMachineSizeTypes = "Standard_GS1"
- // VirtualMachineSizeTypesStandardGS2 ...
- VirtualMachineSizeTypesStandardGS2 VirtualMachineSizeTypes = "Standard_GS2"
- // VirtualMachineSizeTypesStandardGS3 ...
- VirtualMachineSizeTypesStandardGS3 VirtualMachineSizeTypes = "Standard_GS3"
- // VirtualMachineSizeTypesStandardGS4 ...
- VirtualMachineSizeTypesStandardGS4 VirtualMachineSizeTypes = "Standard_GS4"
- // VirtualMachineSizeTypesStandardGS44 ...
- VirtualMachineSizeTypesStandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4"
- // VirtualMachineSizeTypesStandardGS48 ...
- VirtualMachineSizeTypesStandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8"
- // VirtualMachineSizeTypesStandardGS5 ...
- VirtualMachineSizeTypesStandardGS5 VirtualMachineSizeTypes = "Standard_GS5"
- // VirtualMachineSizeTypesStandardGS516 ...
- VirtualMachineSizeTypesStandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16"
- // VirtualMachineSizeTypesStandardGS58 ...
- VirtualMachineSizeTypesStandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8"
- // VirtualMachineSizeTypesStandardH16 ...
- VirtualMachineSizeTypesStandardH16 VirtualMachineSizeTypes = "Standard_H16"
- // VirtualMachineSizeTypesStandardH16m ...
- VirtualMachineSizeTypesStandardH16m VirtualMachineSizeTypes = "Standard_H16m"
- // VirtualMachineSizeTypesStandardH16mr ...
- VirtualMachineSizeTypesStandardH16mr VirtualMachineSizeTypes = "Standard_H16mr"
- // VirtualMachineSizeTypesStandardH16r ...
- VirtualMachineSizeTypesStandardH16r VirtualMachineSizeTypes = "Standard_H16r"
- // VirtualMachineSizeTypesStandardH8 ...
- VirtualMachineSizeTypesStandardH8 VirtualMachineSizeTypes = "Standard_H8"
- // VirtualMachineSizeTypesStandardH8m ...
- VirtualMachineSizeTypesStandardH8m VirtualMachineSizeTypes = "Standard_H8m"
- // VirtualMachineSizeTypesStandardL16s ...
- VirtualMachineSizeTypesStandardL16s VirtualMachineSizeTypes = "Standard_L16s"
- // VirtualMachineSizeTypesStandardL32s ...
- VirtualMachineSizeTypesStandardL32s VirtualMachineSizeTypes = "Standard_L32s"
- // VirtualMachineSizeTypesStandardL4s ...
- VirtualMachineSizeTypesStandardL4s VirtualMachineSizeTypes = "Standard_L4s"
- // VirtualMachineSizeTypesStandardL8s ...
- VirtualMachineSizeTypesStandardL8s VirtualMachineSizeTypes = "Standard_L8s"
- // VirtualMachineSizeTypesStandardM12832ms ...
- VirtualMachineSizeTypesStandardM12832ms VirtualMachineSizeTypes = "Standard_M128-32ms"
- // VirtualMachineSizeTypesStandardM12864ms ...
- VirtualMachineSizeTypesStandardM12864ms VirtualMachineSizeTypes = "Standard_M128-64ms"
- // VirtualMachineSizeTypesStandardM128ms ...
- VirtualMachineSizeTypesStandardM128ms VirtualMachineSizeTypes = "Standard_M128ms"
- // VirtualMachineSizeTypesStandardM128s ...
- VirtualMachineSizeTypesStandardM128s VirtualMachineSizeTypes = "Standard_M128s"
- // VirtualMachineSizeTypesStandardM6416ms ...
- VirtualMachineSizeTypesStandardM6416ms VirtualMachineSizeTypes = "Standard_M64-16ms"
- // VirtualMachineSizeTypesStandardM6432ms ...
- VirtualMachineSizeTypesStandardM6432ms VirtualMachineSizeTypes = "Standard_M64-32ms"
- // VirtualMachineSizeTypesStandardM64ms ...
- VirtualMachineSizeTypesStandardM64ms VirtualMachineSizeTypes = "Standard_M64ms"
- // VirtualMachineSizeTypesStandardM64s ...
- VirtualMachineSizeTypesStandardM64s VirtualMachineSizeTypes = "Standard_M64s"
- // VirtualMachineSizeTypesStandardNC12 ...
- VirtualMachineSizeTypesStandardNC12 VirtualMachineSizeTypes = "Standard_NC12"
- // VirtualMachineSizeTypesStandardNC12sV2 ...
- VirtualMachineSizeTypesStandardNC12sV2 VirtualMachineSizeTypes = "Standard_NC12s_v2"
- // VirtualMachineSizeTypesStandardNC12sV3 ...
- VirtualMachineSizeTypesStandardNC12sV3 VirtualMachineSizeTypes = "Standard_NC12s_v3"
- // VirtualMachineSizeTypesStandardNC24 ...
- VirtualMachineSizeTypesStandardNC24 VirtualMachineSizeTypes = "Standard_NC24"
- // VirtualMachineSizeTypesStandardNC24r ...
- VirtualMachineSizeTypesStandardNC24r VirtualMachineSizeTypes = "Standard_NC24r"
- // VirtualMachineSizeTypesStandardNC24rsV2 ...
- VirtualMachineSizeTypesStandardNC24rsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2"
- // VirtualMachineSizeTypesStandardNC24rsV3 ...
- VirtualMachineSizeTypesStandardNC24rsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3"
- // VirtualMachineSizeTypesStandardNC24sV2 ...
- VirtualMachineSizeTypesStandardNC24sV2 VirtualMachineSizeTypes = "Standard_NC24s_v2"
- // VirtualMachineSizeTypesStandardNC24sV3 ...
- VirtualMachineSizeTypesStandardNC24sV3 VirtualMachineSizeTypes = "Standard_NC24s_v3"
- // VirtualMachineSizeTypesStandardNC6 ...
- VirtualMachineSizeTypesStandardNC6 VirtualMachineSizeTypes = "Standard_NC6"
- // VirtualMachineSizeTypesStandardNC6sV2 ...
- VirtualMachineSizeTypesStandardNC6sV2 VirtualMachineSizeTypes = "Standard_NC6s_v2"
- // VirtualMachineSizeTypesStandardNC6sV3 ...
- VirtualMachineSizeTypesStandardNC6sV3 VirtualMachineSizeTypes = "Standard_NC6s_v3"
- // VirtualMachineSizeTypesStandardND12s ...
- VirtualMachineSizeTypesStandardND12s VirtualMachineSizeTypes = "Standard_ND12s"
- // VirtualMachineSizeTypesStandardND24rs ...
- VirtualMachineSizeTypesStandardND24rs VirtualMachineSizeTypes = "Standard_ND24rs"
- // VirtualMachineSizeTypesStandardND24s ...
- VirtualMachineSizeTypesStandardND24s VirtualMachineSizeTypes = "Standard_ND24s"
- // VirtualMachineSizeTypesStandardND6s ...
- VirtualMachineSizeTypesStandardND6s VirtualMachineSizeTypes = "Standard_ND6s"
- // VirtualMachineSizeTypesStandardNV12 ...
- VirtualMachineSizeTypesStandardNV12 VirtualMachineSizeTypes = "Standard_NV12"
- // VirtualMachineSizeTypesStandardNV24 ...
- VirtualMachineSizeTypesStandardNV24 VirtualMachineSizeTypes = "Standard_NV24"
- // VirtualMachineSizeTypesStandardNV6 ...
- VirtualMachineSizeTypesStandardNV6 VirtualMachineSizeTypes = "Standard_NV6"
-)
-
-// PossibleVirtualMachineSizeTypesValues returns an array of possible values for the VirtualMachineSizeTypes const type.
-func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes {
- return []VirtualMachineSizeTypes{VirtualMachineSizeTypesBasicA0, VirtualMachineSizeTypesBasicA1, VirtualMachineSizeTypesBasicA2, VirtualMachineSizeTypesBasicA3, VirtualMachineSizeTypesBasicA4, VirtualMachineSizeTypesStandardA0, VirtualMachineSizeTypesStandardA1, VirtualMachineSizeTypesStandardA10, VirtualMachineSizeTypesStandardA11, VirtualMachineSizeTypesStandardA1V2, VirtualMachineSizeTypesStandardA2, VirtualMachineSizeTypesStandardA2mV2, VirtualMachineSizeTypesStandardA2V2, VirtualMachineSizeTypesStandardA3, VirtualMachineSizeTypesStandardA4, VirtualMachineSizeTypesStandardA4mV2, VirtualMachineSizeTypesStandardA4V2, VirtualMachineSizeTypesStandardA5, VirtualMachineSizeTypesStandardA6, VirtualMachineSizeTypesStandardA7, VirtualMachineSizeTypesStandardA8, VirtualMachineSizeTypesStandardA8mV2, VirtualMachineSizeTypesStandardA8V2, VirtualMachineSizeTypesStandardA9, VirtualMachineSizeTypesStandardB1ms, VirtualMachineSizeTypesStandardB1s, VirtualMachineSizeTypesStandardB2ms, VirtualMachineSizeTypesStandardB2s, VirtualMachineSizeTypesStandardB4ms, VirtualMachineSizeTypesStandardB8ms, VirtualMachineSizeTypesStandardD1, VirtualMachineSizeTypesStandardD11, VirtualMachineSizeTypesStandardD11V2, VirtualMachineSizeTypesStandardD12, VirtualMachineSizeTypesStandardD12V2, VirtualMachineSizeTypesStandardD13, VirtualMachineSizeTypesStandardD13V2, VirtualMachineSizeTypesStandardD14, VirtualMachineSizeTypesStandardD14V2, VirtualMachineSizeTypesStandardD15V2, VirtualMachineSizeTypesStandardD16sV3, VirtualMachineSizeTypesStandardD16V3, VirtualMachineSizeTypesStandardD1V2, VirtualMachineSizeTypesStandardD2, VirtualMachineSizeTypesStandardD2sV3, VirtualMachineSizeTypesStandardD2V2, VirtualMachineSizeTypesStandardD2V3, VirtualMachineSizeTypesStandardD3, VirtualMachineSizeTypesStandardD32sV3, VirtualMachineSizeTypesStandardD32V3, VirtualMachineSizeTypesStandardD3V2, VirtualMachineSizeTypesStandardD4, VirtualMachineSizeTypesStandardD4sV3, VirtualMachineSizeTypesStandardD4V2, VirtualMachineSizeTypesStandardD4V3, VirtualMachineSizeTypesStandardD5V2, VirtualMachineSizeTypesStandardD64sV3, VirtualMachineSizeTypesStandardD64V3, VirtualMachineSizeTypesStandardD8sV3, VirtualMachineSizeTypesStandardD8V3, VirtualMachineSizeTypesStandardDS1, VirtualMachineSizeTypesStandardDS11, VirtualMachineSizeTypesStandardDS11V2, VirtualMachineSizeTypesStandardDS12, VirtualMachineSizeTypesStandardDS12V2, VirtualMachineSizeTypesStandardDS13, VirtualMachineSizeTypesStandardDS132V2, VirtualMachineSizeTypesStandardDS134V2, VirtualMachineSizeTypesStandardDS13V2, VirtualMachineSizeTypesStandardDS14, VirtualMachineSizeTypesStandardDS144V2, VirtualMachineSizeTypesStandardDS148V2, VirtualMachineSizeTypesStandardDS14V2, VirtualMachineSizeTypesStandardDS15V2, VirtualMachineSizeTypesStandardDS1V2, VirtualMachineSizeTypesStandardDS2, VirtualMachineSizeTypesStandardDS2V2, VirtualMachineSizeTypesStandardDS3, VirtualMachineSizeTypesStandardDS3V2, VirtualMachineSizeTypesStandardDS4, VirtualMachineSizeTypesStandardDS4V2, VirtualMachineSizeTypesStandardDS5V2, VirtualMachineSizeTypesStandardE16sV3, VirtualMachineSizeTypesStandardE16V3, VirtualMachineSizeTypesStandardE2sV3, VirtualMachineSizeTypesStandardE2V3, VirtualMachineSizeTypesStandardE3216V3, VirtualMachineSizeTypesStandardE328sV3, VirtualMachineSizeTypesStandardE32sV3, VirtualMachineSizeTypesStandardE32V3, VirtualMachineSizeTypesStandardE4sV3, VirtualMachineSizeTypesStandardE4V3, VirtualMachineSizeTypesStandardE6416sV3, VirtualMachineSizeTypesStandardE6432sV3, VirtualMachineSizeTypesStandardE64sV3, VirtualMachineSizeTypesStandardE64V3, VirtualMachineSizeTypesStandardE8sV3, VirtualMachineSizeTypesStandardE8V3, VirtualMachineSizeTypesStandardF1, VirtualMachineSizeTypesStandardF16, VirtualMachineSizeTypesStandardF16s, VirtualMachineSizeTypesStandardF16sV2, VirtualMachineSizeTypesStandardF1s, VirtualMachineSizeTypesStandardF2, VirtualMachineSizeTypesStandardF2s, VirtualMachineSizeTypesStandardF2sV2, VirtualMachineSizeTypesStandardF32sV2, VirtualMachineSizeTypesStandardF4, VirtualMachineSizeTypesStandardF4s, VirtualMachineSizeTypesStandardF4sV2, VirtualMachineSizeTypesStandardF64sV2, VirtualMachineSizeTypesStandardF72sV2, VirtualMachineSizeTypesStandardF8, VirtualMachineSizeTypesStandardF8s, VirtualMachineSizeTypesStandardF8sV2, VirtualMachineSizeTypesStandardG1, VirtualMachineSizeTypesStandardG2, VirtualMachineSizeTypesStandardG3, VirtualMachineSizeTypesStandardG4, VirtualMachineSizeTypesStandardG5, VirtualMachineSizeTypesStandardGS1, VirtualMachineSizeTypesStandardGS2, VirtualMachineSizeTypesStandardGS3, VirtualMachineSizeTypesStandardGS4, VirtualMachineSizeTypesStandardGS44, VirtualMachineSizeTypesStandardGS48, VirtualMachineSizeTypesStandardGS5, VirtualMachineSizeTypesStandardGS516, VirtualMachineSizeTypesStandardGS58, VirtualMachineSizeTypesStandardH16, VirtualMachineSizeTypesStandardH16m, VirtualMachineSizeTypesStandardH16mr, VirtualMachineSizeTypesStandardH16r, VirtualMachineSizeTypesStandardH8, VirtualMachineSizeTypesStandardH8m, VirtualMachineSizeTypesStandardL16s, VirtualMachineSizeTypesStandardL32s, VirtualMachineSizeTypesStandardL4s, VirtualMachineSizeTypesStandardL8s, VirtualMachineSizeTypesStandardM12832ms, VirtualMachineSizeTypesStandardM12864ms, VirtualMachineSizeTypesStandardM128ms, VirtualMachineSizeTypesStandardM128s, VirtualMachineSizeTypesStandardM6416ms, VirtualMachineSizeTypesStandardM6432ms, VirtualMachineSizeTypesStandardM64ms, VirtualMachineSizeTypesStandardM64s, VirtualMachineSizeTypesStandardNC12, VirtualMachineSizeTypesStandardNC12sV2, VirtualMachineSizeTypesStandardNC12sV3, VirtualMachineSizeTypesStandardNC24, VirtualMachineSizeTypesStandardNC24r, VirtualMachineSizeTypesStandardNC24rsV2, VirtualMachineSizeTypesStandardNC24rsV3, VirtualMachineSizeTypesStandardNC24sV2, VirtualMachineSizeTypesStandardNC24sV3, VirtualMachineSizeTypesStandardNC6, VirtualMachineSizeTypesStandardNC6sV2, VirtualMachineSizeTypesStandardNC6sV3, VirtualMachineSizeTypesStandardND12s, VirtualMachineSizeTypesStandardND24rs, VirtualMachineSizeTypesStandardND24s, VirtualMachineSizeTypesStandardND6s, VirtualMachineSizeTypesStandardNV12, VirtualMachineSizeTypesStandardNV24, VirtualMachineSizeTypesStandardNV6}
-}
-
-// VMDiskTypes enumerates the values for vm disk types.
-type VMDiskTypes string
-
-const (
- // VMDiskTypesNone ...
- VMDiskTypesNone VMDiskTypes = "None"
- // VMDiskTypesUnmanaged ...
- VMDiskTypesUnmanaged VMDiskTypes = "Unmanaged"
-)
-
-// PossibleVMDiskTypesValues returns an array of possible values for the VMDiskTypes const type.
-func PossibleVMDiskTypesValues() []VMDiskTypes {
- return []VMDiskTypes{VMDiskTypesNone, VMDiskTypesUnmanaged}
-}
-
-// VMGuestPatchClassificationLinux enumerates the values for vm guest patch classification linux.
-type VMGuestPatchClassificationLinux string
-
-const (
- // VMGuestPatchClassificationLinuxCritical ...
- VMGuestPatchClassificationLinuxCritical VMGuestPatchClassificationLinux = "Critical"
- // VMGuestPatchClassificationLinuxOther ...
- VMGuestPatchClassificationLinuxOther VMGuestPatchClassificationLinux = "Other"
- // VMGuestPatchClassificationLinuxSecurity ...
- VMGuestPatchClassificationLinuxSecurity VMGuestPatchClassificationLinux = "Security"
-)
-
-// PossibleVMGuestPatchClassificationLinuxValues returns an array of possible values for the VMGuestPatchClassificationLinux const type.
-func PossibleVMGuestPatchClassificationLinuxValues() []VMGuestPatchClassificationLinux {
- return []VMGuestPatchClassificationLinux{VMGuestPatchClassificationLinuxCritical, VMGuestPatchClassificationLinuxOther, VMGuestPatchClassificationLinuxSecurity}
-}
-
-// VMGuestPatchClassificationWindows enumerates the values for vm guest patch classification windows.
-type VMGuestPatchClassificationWindows string
-
-const (
- // VMGuestPatchClassificationWindowsCritical ...
- VMGuestPatchClassificationWindowsCritical VMGuestPatchClassificationWindows = "Critical"
- // VMGuestPatchClassificationWindowsDefinition ...
- VMGuestPatchClassificationWindowsDefinition VMGuestPatchClassificationWindows = "Definition"
- // VMGuestPatchClassificationWindowsFeaturePack ...
- VMGuestPatchClassificationWindowsFeaturePack VMGuestPatchClassificationWindows = "FeaturePack"
- // VMGuestPatchClassificationWindowsSecurity ...
- VMGuestPatchClassificationWindowsSecurity VMGuestPatchClassificationWindows = "Security"
- // VMGuestPatchClassificationWindowsServicePack ...
- VMGuestPatchClassificationWindowsServicePack VMGuestPatchClassificationWindows = "ServicePack"
- // VMGuestPatchClassificationWindowsTools ...
- VMGuestPatchClassificationWindowsTools VMGuestPatchClassificationWindows = "Tools"
- // VMGuestPatchClassificationWindowsUpdateRollUp ...
- VMGuestPatchClassificationWindowsUpdateRollUp VMGuestPatchClassificationWindows = "UpdateRollUp"
- // VMGuestPatchClassificationWindowsUpdates ...
- VMGuestPatchClassificationWindowsUpdates VMGuestPatchClassificationWindows = "Updates"
-)
-
-// PossibleVMGuestPatchClassificationWindowsValues returns an array of possible values for the VMGuestPatchClassificationWindows const type.
-func PossibleVMGuestPatchClassificationWindowsValues() []VMGuestPatchClassificationWindows {
- return []VMGuestPatchClassificationWindows{VMGuestPatchClassificationWindowsCritical, VMGuestPatchClassificationWindowsDefinition, VMGuestPatchClassificationWindowsFeaturePack, VMGuestPatchClassificationWindowsSecurity, VMGuestPatchClassificationWindowsServicePack, VMGuestPatchClassificationWindowsTools, VMGuestPatchClassificationWindowsUpdateRollUp, VMGuestPatchClassificationWindowsUpdates}
-}
-
-// VMGuestPatchRebootBehavior enumerates the values for vm guest patch reboot behavior.
-type VMGuestPatchRebootBehavior string
-
-const (
- // VMGuestPatchRebootBehaviorAlwaysRequiresReboot ...
- VMGuestPatchRebootBehaviorAlwaysRequiresReboot VMGuestPatchRebootBehavior = "AlwaysRequiresReboot"
- // VMGuestPatchRebootBehaviorCanRequestReboot ...
- VMGuestPatchRebootBehaviorCanRequestReboot VMGuestPatchRebootBehavior = "CanRequestReboot"
- // VMGuestPatchRebootBehaviorNeverReboots ...
- VMGuestPatchRebootBehaviorNeverReboots VMGuestPatchRebootBehavior = "NeverReboots"
- // VMGuestPatchRebootBehaviorUnknown ...
- VMGuestPatchRebootBehaviorUnknown VMGuestPatchRebootBehavior = "Unknown"
-)
-
-// PossibleVMGuestPatchRebootBehaviorValues returns an array of possible values for the VMGuestPatchRebootBehavior const type.
-func PossibleVMGuestPatchRebootBehaviorValues() []VMGuestPatchRebootBehavior {
- return []VMGuestPatchRebootBehavior{VMGuestPatchRebootBehaviorAlwaysRequiresReboot, VMGuestPatchRebootBehaviorCanRequestReboot, VMGuestPatchRebootBehaviorNeverReboots, VMGuestPatchRebootBehaviorUnknown}
-}
-
-// VMGuestPatchRebootSetting enumerates the values for vm guest patch reboot setting.
-type VMGuestPatchRebootSetting string
-
-const (
- // VMGuestPatchRebootSettingAlways ...
- VMGuestPatchRebootSettingAlways VMGuestPatchRebootSetting = "Always"
- // VMGuestPatchRebootSettingIfRequired ...
- VMGuestPatchRebootSettingIfRequired VMGuestPatchRebootSetting = "IfRequired"
- // VMGuestPatchRebootSettingNever ...
- VMGuestPatchRebootSettingNever VMGuestPatchRebootSetting = "Never"
-)
-
-// PossibleVMGuestPatchRebootSettingValues returns an array of possible values for the VMGuestPatchRebootSetting const type.
-func PossibleVMGuestPatchRebootSettingValues() []VMGuestPatchRebootSetting {
- return []VMGuestPatchRebootSetting{VMGuestPatchRebootSettingAlways, VMGuestPatchRebootSettingIfRequired, VMGuestPatchRebootSettingNever}
-}
-
-// VMGuestPatchRebootStatus enumerates the values for vm guest patch reboot status.
-type VMGuestPatchRebootStatus string
-
-const (
- // VMGuestPatchRebootStatusCompleted ...
- VMGuestPatchRebootStatusCompleted VMGuestPatchRebootStatus = "Completed"
- // VMGuestPatchRebootStatusFailed ...
- VMGuestPatchRebootStatusFailed VMGuestPatchRebootStatus = "Failed"
- // VMGuestPatchRebootStatusNotNeeded ...
- VMGuestPatchRebootStatusNotNeeded VMGuestPatchRebootStatus = "NotNeeded"
- // VMGuestPatchRebootStatusRequired ...
- VMGuestPatchRebootStatusRequired VMGuestPatchRebootStatus = "Required"
- // VMGuestPatchRebootStatusStarted ...
- VMGuestPatchRebootStatusStarted VMGuestPatchRebootStatus = "Started"
- // VMGuestPatchRebootStatusUnknown ...
- VMGuestPatchRebootStatusUnknown VMGuestPatchRebootStatus = "Unknown"
-)
-
-// PossibleVMGuestPatchRebootStatusValues returns an array of possible values for the VMGuestPatchRebootStatus const type.
-func PossibleVMGuestPatchRebootStatusValues() []VMGuestPatchRebootStatus {
- return []VMGuestPatchRebootStatus{VMGuestPatchRebootStatusCompleted, VMGuestPatchRebootStatusFailed, VMGuestPatchRebootStatusNotNeeded, VMGuestPatchRebootStatusRequired, VMGuestPatchRebootStatusStarted, VMGuestPatchRebootStatusUnknown}
-}
-
-// WindowsPatchAssessmentMode enumerates the values for windows patch assessment mode.
-type WindowsPatchAssessmentMode string
-
-const (
- // WindowsPatchAssessmentModeAutomaticByPlatform ...
- WindowsPatchAssessmentModeAutomaticByPlatform WindowsPatchAssessmentMode = "AutomaticByPlatform"
- // WindowsPatchAssessmentModeImageDefault ...
- WindowsPatchAssessmentModeImageDefault WindowsPatchAssessmentMode = "ImageDefault"
-)
-
-// PossibleWindowsPatchAssessmentModeValues returns an array of possible values for the WindowsPatchAssessmentMode const type.
-func PossibleWindowsPatchAssessmentModeValues() []WindowsPatchAssessmentMode {
- return []WindowsPatchAssessmentMode{WindowsPatchAssessmentModeAutomaticByPlatform, WindowsPatchAssessmentModeImageDefault}
-}
-
-// WindowsVMGuestPatchMode enumerates the values for windows vm guest patch mode.
-type WindowsVMGuestPatchMode string
-
-const (
- // WindowsVMGuestPatchModeAutomaticByOS ...
- WindowsVMGuestPatchModeAutomaticByOS WindowsVMGuestPatchMode = "AutomaticByOS"
- // WindowsVMGuestPatchModeAutomaticByPlatform ...
- WindowsVMGuestPatchModeAutomaticByPlatform WindowsVMGuestPatchMode = "AutomaticByPlatform"
- // WindowsVMGuestPatchModeManual ...
- WindowsVMGuestPatchModeManual WindowsVMGuestPatchMode = "Manual"
-)
-
-// PossibleWindowsVMGuestPatchModeValues returns an array of possible values for the WindowsVMGuestPatchMode const type.
-func PossibleWindowsVMGuestPatchModeValues() []WindowsVMGuestPatchMode {
- return []WindowsVMGuestPatchMode{WindowsVMGuestPatchModeAutomaticByOS, WindowsVMGuestPatchModeAutomaticByPlatform, WindowsVMGuestPatchModeManual}
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go
deleted file mode 100644
index 9730526f7605..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleries.go
+++ /dev/null
@@ -1,584 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// GalleriesClient is the compute Client
-type GalleriesClient struct {
- BaseClient
-}
-
-// NewGalleriesClient creates an instance of the GalleriesClient client.
-func NewGalleriesClient(subscriptionID string) GalleriesClient {
- return NewGalleriesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewGalleriesClientWithBaseURI creates an instance of the GalleriesClient client using a custom endpoint. Use this
-// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewGalleriesClientWithBaseURI(baseURI string, subscriptionID string) GalleriesClient {
- return GalleriesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a Shared Image Gallery.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery. The allowed characters are alphabets and numbers with
-// dots and periods allowed in the middle. The maximum length is 80 characters.
-// gallery - parameters supplied to the create or update Shared Image Gallery operation.
-func (client GalleriesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery) (result GalleriesCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, gallery)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client GalleriesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters),
- autorest.WithJSON(gallery),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleriesClient) CreateOrUpdateSender(req *http.Request) (future GalleriesCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client GalleriesClient) CreateOrUpdateResponder(resp *http.Response) (result Gallery, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete a Shared Image Gallery.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery to be deleted.
-func (client GalleriesClient) Delete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleriesDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client GalleriesClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleriesClient) DeleteSender(req *http.Request) (future GalleriesDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client GalleriesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about a Shared Image Gallery.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery.
-// selectParameter - the select expression to apply on the operation.
-func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, selectParameter SelectPermissions) (result Gallery, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, selectParameter)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client GalleriesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, selectParameter SelectPermissions) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(selectParameter)) > 0 {
- queryParameters["$select"] = autorest.Encode("query", selectParameter)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleriesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client GalleriesClient) GetResponder(resp *http.Response) (result Gallery, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List list galleries under a subscription.
-func (client GalleriesClient) List(ctx context.Context) (result GalleryListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.List")
- defer func() {
- sc := -1
- if result.gl.Response.Response != nil {
- sc = result.gl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.gl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.gl, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.gl.hasNextLink() && result.gl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client GalleriesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/galleries", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleriesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client GalleriesClient) ListResponder(resp *http.Response) (result GalleryList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client GalleriesClient) listNextResults(ctx context.Context, lastResults GalleryList) (result GalleryList, err error) {
- req, err := lastResults.galleryListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client GalleriesClient) ListComplete(ctx context.Context) (result GalleryListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx)
- return
-}
-
-// ListByResourceGroup list galleries under a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client GalleriesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result GalleryListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.gl.Response.Response != nil {
- sc = result.gl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.gl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.gl, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.gl.hasNextLink() && result.gl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client GalleriesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleriesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client GalleriesClient) ListByResourceGroupResponder(resp *http.Response) (result GalleryList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client GalleriesClient) listByResourceGroupNextResults(ctx context.Context, lastResults GalleryList) (result GalleryList, err error) {
- req, err := lastResults.galleryListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client GalleriesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result GalleryListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// Update update a Shared Image Gallery.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery. The allowed characters are alphabets and numbers with
-// dots and periods allowed in the middle. The maximum length is 80 characters.
-// gallery - parameters supplied to the update Shared Image Gallery operation.
-func (client GalleriesClient) Update(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate) (result GalleriesUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, gallery)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client GalleriesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters),
- autorest.WithJSON(gallery),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleriesClient) UpdateSender(req *http.Request) (future GalleriesUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client GalleriesClient) UpdateResponder(resp *http.Response) (result Gallery, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go
deleted file mode 100644
index f496086291f8..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplications.go
+++ /dev/null
@@ -1,485 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// GalleryApplicationsClient is the compute Client
-type GalleryApplicationsClient struct {
- BaseClient
-}
-
-// NewGalleryApplicationsClient creates an instance of the GalleryApplicationsClient client.
-func NewGalleryApplicationsClient(subscriptionID string) GalleryApplicationsClient {
- return NewGalleryApplicationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewGalleryApplicationsClientWithBaseURI creates an instance of the GalleryApplicationsClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewGalleryApplicationsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationsClient {
- return GalleryApplicationsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a gallery Application Definition.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery in which the Application Definition is to be
-// created.
-// galleryApplicationName - the name of the gallery Application Definition to be created or updated. The
-// allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The
-// maximum length is 80 characters.
-// galleryApplication - parameters supplied to the create or update gallery Application operation.
-func (client GalleryApplicationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication) (result GalleryApplicationsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client GalleryApplicationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters),
- autorest.WithJSON(galleryApplication),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationsClient) CreateOrUpdateSender(req *http.Request) (future GalleryApplicationsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryApplication, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete a gallery Application.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery in which the Application Definition is to be
-// deleted.
-// galleryApplicationName - the name of the gallery Application Definition to be deleted.
-func (client GalleryApplicationsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client GalleryApplicationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationsClient) DeleteSender(req *http.Request) (future GalleryApplicationsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about a gallery Application Definition.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery from which the Application Definitions are to be
-// retrieved.
-// galleryApplicationName - the name of the gallery Application Definition to be retrieved.
-func (client GalleryApplicationsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplication, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client GalleryApplicationsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationsClient) GetResponder(resp *http.Response) (result GalleryApplication, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByGallery list gallery Application Definitions in a gallery.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery from which Application Definitions are to be
-// listed.
-func (client GalleryApplicationsClient) ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryApplicationListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.ListByGallery")
- defer func() {
- sc := -1
- if result.gal.Response.Response != nil {
- sc = result.gal.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByGalleryNextResults
- req, err := client.ListByGalleryPreparer(ctx, resourceGroupName, galleryName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByGallerySender(req)
- if err != nil {
- result.gal.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", resp, "Failure sending request")
- return
- }
-
- result.gal, err = client.ListByGalleryResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", resp, "Failure responding to request")
- return
- }
- if result.gal.hasNextLink() && result.gal.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByGalleryPreparer prepares the ListByGallery request.
-func (client GalleryApplicationsClient) ListByGalleryPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByGallerySender sends the ListByGallery request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationsClient) ListByGallerySender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByGalleryResponder handles the response to the ListByGallery request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationsClient) ListByGalleryResponder(resp *http.Response) (result GalleryApplicationList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByGalleryNextResults retrieves the next set of results, if any.
-func (client GalleryApplicationsClient) listByGalleryNextResults(ctx context.Context, lastResults GalleryApplicationList) (result GalleryApplicationList, err error) {
- req, err := lastResults.galleryApplicationListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByGallerySender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByGalleryResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByGalleryComplete enumerates all values, automatically crossing page boundaries as required.
-func (client GalleryApplicationsClient) ListByGalleryComplete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryApplicationListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.ListByGallery")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByGallery(ctx, resourceGroupName, galleryName)
- return
-}
-
-// Update update a gallery Application Definition.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery in which the Application Definition is to be
-// updated.
-// galleryApplicationName - the name of the gallery Application Definition to be updated. The allowed
-// characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum
-// length is 80 characters.
-// galleryApplication - parameters supplied to the update gallery Application operation.
-func (client GalleryApplicationsClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate) (result GalleryApplicationsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client GalleryApplicationsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters),
- autorest.WithJSON(galleryApplication),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationsClient) UpdateSender(req *http.Request) (future GalleryApplicationsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationsClient) UpdateResponder(resp *http.Response) (result GalleryApplication, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go
deleted file mode 100644
index 947fae104d74..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryapplicationversions.go
+++ /dev/null
@@ -1,516 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// GalleryApplicationVersionsClient is the compute Client
-type GalleryApplicationVersionsClient struct {
- BaseClient
-}
-
-// NewGalleryApplicationVersionsClient creates an instance of the GalleryApplicationVersionsClient client.
-func NewGalleryApplicationVersionsClient(subscriptionID string) GalleryApplicationVersionsClient {
- return NewGalleryApplicationVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewGalleryApplicationVersionsClientWithBaseURI creates an instance of the GalleryApplicationVersionsClient client
-// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewGalleryApplicationVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationVersionsClient {
- return GalleryApplicationVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a gallery Application Version.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
-// galleryApplicationName - the name of the gallery Application Definition in which the Application Version is
-// to be created.
-// galleryApplicationVersionName - the name of the gallery Application Version to be created. Needs to follow
-// semantic version name pattern: The allowed characters are digit and period. Digits must be within the range
-// of a 32-bit integer. Format: ..
-// galleryApplicationVersion - parameters supplied to the create or update gallery Application Version
-// operation.
-func (client GalleryApplicationVersionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion) (result GalleryApplicationVersionsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: galleryApplicationVersion,
- Constraints: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile", Name: validation.Null, Rule: true,
- Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.Source", Name: validation.Null, Rule: true,
- Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.Source.MediaLink", Name: validation.Null, Rule: true, Chain: nil}}},
- {Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.ManageActions", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.ManageActions.Install", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.ManageActions.Remove", Name: validation.Null, Rule: true, Chain: nil},
- }},
- }},
- }}}}}); err != nil {
- return result, validation.NewError("compute.GalleryApplicationVersionsClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client GalleryApplicationVersionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters),
- autorest.WithJSON(galleryApplicationVersion),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationVersionsClient) CreateOrUpdateSender(req *http.Request) (future GalleryApplicationVersionsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryApplicationVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete a gallery Application Version.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
-// galleryApplicationName - the name of the gallery Application Definition in which the Application Version
-// resides.
-// galleryApplicationVersionName - the name of the gallery Application Version to be deleted.
-func (client GalleryApplicationVersionsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string) (result GalleryApplicationVersionsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client GalleryApplicationVersionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationVersionsClient) DeleteSender(req *http.Request) (future GalleryApplicationVersionsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about a gallery Application Version.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
-// galleryApplicationName - the name of the gallery Application Definition in which the Application Version
-// resides.
-// galleryApplicationVersionName - the name of the gallery Application Version to be retrieved.
-// expand - the expand expression to apply on the operation.
-func (client GalleryApplicationVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, expand ReplicationStatusTypes) (result GalleryApplicationVersion, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client GalleryApplicationVersionsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, expand ReplicationStatusTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationVersionsClient) GetResponder(resp *http.Response) (result GalleryApplicationVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByGalleryApplication list gallery Application Versions in a gallery Application Definition.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
-// galleryApplicationName - the name of the Shared Application Gallery Application Definition from which the
-// Application Versions are to be listed.
-func (client GalleryApplicationVersionsClient) ListByGalleryApplication(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationVersionListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.ListByGalleryApplication")
- defer func() {
- sc := -1
- if result.gavl.Response.Response != nil {
- sc = result.gavl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByGalleryApplicationNextResults
- req, err := client.ListByGalleryApplicationPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByGalleryApplicationSender(req)
- if err != nil {
- result.gavl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", resp, "Failure sending request")
- return
- }
-
- result.gavl, err = client.ListByGalleryApplicationResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", resp, "Failure responding to request")
- return
- }
- if result.gavl.hasNextLink() && result.gavl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByGalleryApplicationPreparer prepares the ListByGalleryApplication request.
-func (client GalleryApplicationVersionsClient) ListByGalleryApplicationPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByGalleryApplicationSender sends the ListByGalleryApplication request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationVersionsClient) ListByGalleryApplicationSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByGalleryApplicationResponder handles the response to the ListByGalleryApplication request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationVersionsClient) ListByGalleryApplicationResponder(resp *http.Response) (result GalleryApplicationVersionList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByGalleryApplicationNextResults retrieves the next set of results, if any.
-func (client GalleryApplicationVersionsClient) listByGalleryApplicationNextResults(ctx context.Context, lastResults GalleryApplicationVersionList) (result GalleryApplicationVersionList, err error) {
- req, err := lastResults.galleryApplicationVersionListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByGalleryApplicationSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByGalleryApplicationResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByGalleryApplicationComplete enumerates all values, automatically crossing page boundaries as required.
-func (client GalleryApplicationVersionsClient) ListByGalleryApplicationComplete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationVersionListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.ListByGalleryApplication")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByGalleryApplication(ctx, resourceGroupName, galleryName, galleryApplicationName)
- return
-}
-
-// Update update a gallery Application Version.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
-// galleryApplicationName - the name of the gallery Application Definition in which the Application Version is
-// to be updated.
-// galleryApplicationVersionName - the name of the gallery Application Version to be updated. Needs to follow
-// semantic version name pattern: The allowed characters are digit and period. Digits must be within the range
-// of a 32-bit integer. Format: ..
-// galleryApplicationVersion - parameters supplied to the update gallery Application Version operation.
-func (client GalleryApplicationVersionsClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate) (result GalleryApplicationVersionsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client GalleryApplicationVersionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
- "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters),
- autorest.WithJSON(galleryApplicationVersion),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryApplicationVersionsClient) UpdateSender(req *http.Request) (future GalleryApplicationVersionsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client GalleryApplicationVersionsClient) UpdateResponder(resp *http.Response) (result GalleryApplicationVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go
deleted file mode 100644
index bf16351b3ec6..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimages.go
+++ /dev/null
@@ -1,492 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// GalleryImagesClient is the compute Client
-type GalleryImagesClient struct {
- BaseClient
-}
-
-// NewGalleryImagesClient creates an instance of the GalleryImagesClient client.
-func NewGalleryImagesClient(subscriptionID string) GalleryImagesClient {
- return NewGalleryImagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewGalleryImagesClientWithBaseURI creates an instance of the GalleryImagesClient client using a custom endpoint.
-// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) GalleryImagesClient {
- return GalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a gallery image definition.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery in which the Image Definition is to be created.
-// galleryImageName - the name of the gallery image definition to be created or updated. The allowed characters
-// are alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80
-// characters.
-// galleryImage - parameters supplied to the create or update gallery image operation.
-func (client GalleryImagesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage) (result GalleryImagesCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: galleryImage,
- Constraints: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties.Identifier", Name: validation.Null, Rule: true,
- Chain: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties.Identifier.Publisher", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "galleryImage.GalleryImageProperties.Identifier.Offer", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "galleryImage.GalleryImageProperties.Identifier.Sku", Name: validation.Null, Rule: true, Chain: nil},
- }},
- }}}}}); err != nil {
- return result, validation.NewError("compute.GalleryImagesClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client GalleryImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters),
- autorest.WithJSON(galleryImage),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImagesClient) CreateOrUpdateSender(req *http.Request) (future GalleryImagesCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client GalleryImagesClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete a gallery image.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery in which the Image Definition is to be deleted.
-// galleryImageName - the name of the gallery image definition to be deleted.
-func (client GalleryImagesClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImagesDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryImageName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client GalleryImagesClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImagesClient) DeleteSender(req *http.Request) (future GalleryImagesDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client GalleryImagesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about a gallery image definition.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery from which the Image Definitions are to be retrieved.
-// galleryImageName - the name of the gallery image definition to be retrieved.
-func (client GalleryImagesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryImageName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client GalleryImagesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImagesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client GalleryImagesClient) GetResponder(resp *http.Response) (result GalleryImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByGallery list gallery image definitions in a gallery.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery from which Image Definitions are to be listed.
-func (client GalleryImagesClient) ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryImageListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.ListByGallery")
- defer func() {
- sc := -1
- if result.gil.Response.Response != nil {
- sc = result.gil.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByGalleryNextResults
- req, err := client.ListByGalleryPreparer(ctx, resourceGroupName, galleryName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByGallerySender(req)
- if err != nil {
- result.gil.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", resp, "Failure sending request")
- return
- }
-
- result.gil, err = client.ListByGalleryResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", resp, "Failure responding to request")
- return
- }
- if result.gil.hasNextLink() && result.gil.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByGalleryPreparer prepares the ListByGallery request.
-func (client GalleryImagesClient) ListByGalleryPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByGallerySender sends the ListByGallery request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImagesClient) ListByGallerySender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByGalleryResponder handles the response to the ListByGallery request. The method always
-// closes the http.Response Body.
-func (client GalleryImagesClient) ListByGalleryResponder(resp *http.Response) (result GalleryImageList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByGalleryNextResults retrieves the next set of results, if any.
-func (client GalleryImagesClient) listByGalleryNextResults(ctx context.Context, lastResults GalleryImageList) (result GalleryImageList, err error) {
- req, err := lastResults.galleryImageListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByGallerySender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByGalleryResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByGalleryComplete enumerates all values, automatically crossing page boundaries as required.
-func (client GalleryImagesClient) ListByGalleryComplete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryImageListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.ListByGallery")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByGallery(ctx, resourceGroupName, galleryName)
- return
-}
-
-// Update update a gallery image definition.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery in which the Image Definition is to be updated.
-// galleryImageName - the name of the gallery image definition to be updated. The allowed characters are
-// alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80
-// characters.
-// galleryImage - parameters supplied to the update gallery image operation.
-func (client GalleryImagesClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate) (result GalleryImagesUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client GalleryImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters),
- autorest.WithJSON(galleryImage),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImagesClient) UpdateSender(req *http.Request) (future GalleryImagesUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client GalleryImagesClient) UpdateResponder(resp *http.Response) (result GalleryImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go
deleted file mode 100644
index 0ee7a395b9ea..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/galleryimageversions.go
+++ /dev/null
@@ -1,503 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// GalleryImageVersionsClient is the compute Client
-type GalleryImageVersionsClient struct {
- BaseClient
-}
-
-// NewGalleryImageVersionsClient creates an instance of the GalleryImageVersionsClient client.
-func NewGalleryImageVersionsClient(subscriptionID string) GalleryImageVersionsClient {
- return NewGalleryImageVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewGalleryImageVersionsClientWithBaseURI creates an instance of the GalleryImageVersionsClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryImageVersionsClient {
- return GalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a gallery image version.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery in which the Image Definition resides.
-// galleryImageName - the name of the gallery image definition in which the Image Version is to be created.
-// galleryImageVersionName - the name of the gallery image version to be created. Needs to follow semantic
-// version name pattern: The allowed characters are digit and period. Digits must be within the range of a
-// 32-bit integer. Format: ..
-// galleryImageVersion - parameters supplied to the create or update gallery image version operation.
-func (client GalleryImageVersionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion) (result GalleryImageVersionsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: galleryImageVersion,
- Constraints: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties.StorageProfile", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
- return result, validation.NewError("compute.GalleryImageVersionsClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client GalleryImageVersionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters),
- autorest.WithJSON(galleryImageVersion),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImageVersionsClient) CreateOrUpdateSender(req *http.Request) (future GalleryImageVersionsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client GalleryImageVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryImageVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete a gallery image version.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery in which the Image Definition resides.
-// galleryImageName - the name of the gallery image definition in which the Image Version resides.
-// galleryImageVersionName - the name of the gallery image version to be deleted.
-func (client GalleryImageVersionsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (result GalleryImageVersionsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client GalleryImageVersionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImageVersionsClient) DeleteSender(req *http.Request) (future GalleryImageVersionsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client GalleryImageVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about a gallery image version.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery in which the Image Definition resides.
-// galleryImageName - the name of the gallery image definition in which the Image Version resides.
-// galleryImageVersionName - the name of the gallery image version to be retrieved.
-// expand - the expand expression to apply on the operation.
-func (client GalleryImageVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand ReplicationStatusTypes) (result GalleryImageVersion, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client GalleryImageVersionsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand ReplicationStatusTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImageVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client GalleryImageVersionsClient) GetResponder(resp *http.Response) (result GalleryImageVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByGalleryImage list gallery image versions in a gallery image definition.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery in which the Image Definition resides.
-// galleryImageName - the name of the Shared Image Gallery Image Definition from which the Image Versions are
-// to be listed.
-func (client GalleryImageVersionsClient) ListByGalleryImage(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImageVersionListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.ListByGalleryImage")
- defer func() {
- sc := -1
- if result.givl.Response.Response != nil {
- sc = result.givl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByGalleryImageNextResults
- req, err := client.ListByGalleryImagePreparer(ctx, resourceGroupName, galleryName, galleryImageName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByGalleryImageSender(req)
- if err != nil {
- result.givl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", resp, "Failure sending request")
- return
- }
-
- result.givl, err = client.ListByGalleryImageResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", resp, "Failure responding to request")
- return
- }
- if result.givl.hasNextLink() && result.givl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByGalleryImagePreparer prepares the ListByGalleryImage request.
-func (client GalleryImageVersionsClient) ListByGalleryImagePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByGalleryImageSender sends the ListByGalleryImage request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImageVersionsClient) ListByGalleryImageSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByGalleryImageResponder handles the response to the ListByGalleryImage request. The method always
-// closes the http.Response Body.
-func (client GalleryImageVersionsClient) ListByGalleryImageResponder(resp *http.Response) (result GalleryImageVersionList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByGalleryImageNextResults retrieves the next set of results, if any.
-func (client GalleryImageVersionsClient) listByGalleryImageNextResults(ctx context.Context, lastResults GalleryImageVersionList) (result GalleryImageVersionList, err error) {
- req, err := lastResults.galleryImageVersionListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByGalleryImageSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByGalleryImageResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByGalleryImageComplete enumerates all values, automatically crossing page boundaries as required.
-func (client GalleryImageVersionsClient) ListByGalleryImageComplete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImageVersionListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.ListByGalleryImage")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByGalleryImage(ctx, resourceGroupName, galleryName, galleryImageName)
- return
-}
-
-// Update update a gallery image version.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery in which the Image Definition resides.
-// galleryImageName - the name of the gallery image definition in which the Image Version is to be updated.
-// galleryImageVersionName - the name of the gallery image version to be updated. Needs to follow semantic
-// version name pattern: The allowed characters are digit and period. Digits must be within the range of a
-// 32-bit integer. Format: ..
-// galleryImageVersion - parameters supplied to the update gallery image version operation.
-func (client GalleryImageVersionsClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate) (result GalleryImageVersionsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client GalleryImageVersionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName),
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters),
- autorest.WithJSON(galleryImageVersion),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client GalleryImageVersionsClient) UpdateSender(req *http.Request) (future GalleryImageVersionsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client GalleryImageVersionsClient) UpdateResponder(resp *http.Response) (result GalleryImageVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go
deleted file mode 100644
index eab53278bb40..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/gallerysharingprofile.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// GallerySharingProfileClient is the compute Client
-type GallerySharingProfileClient struct {
- BaseClient
-}
-
-// NewGallerySharingProfileClient creates an instance of the GallerySharingProfileClient client.
-func NewGallerySharingProfileClient(subscriptionID string) GallerySharingProfileClient {
- return NewGallerySharingProfileClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewGallerySharingProfileClientWithBaseURI creates an instance of the GallerySharingProfileClient client using a
-// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
-// Azure stack).
-func NewGallerySharingProfileClientWithBaseURI(baseURI string, subscriptionID string) GallerySharingProfileClient {
- return GallerySharingProfileClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Update update sharing profile of a gallery.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// galleryName - the name of the Shared Image Gallery.
-// sharingUpdate - parameters supplied to the update gallery sharing profile.
-func (client GallerySharingProfileClient) Update(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate) (result GallerySharingProfileUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GallerySharingProfileClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, sharingUpdate)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GallerySharingProfileClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GallerySharingProfileClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client GallerySharingProfileClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryName": autorest.Encode("path", galleryName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/share", pathParameters),
- autorest.WithJSON(sharingUpdate),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client GallerySharingProfileClient) UpdateSender(req *http.Request) (future GallerySharingProfileUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client GallerySharingProfileClient) UpdateResponder(resp *http.Response) (result SharingUpdate, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go
deleted file mode 100644
index 66a9bcbd0c44..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/images.go
+++ /dev/null
@@ -1,583 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// ImagesClient is the compute Client
-type ImagesClient struct {
- BaseClient
-}
-
-// NewImagesClient creates an instance of the ImagesClient client.
-func NewImagesClient(subscriptionID string) ImagesClient {
- return NewImagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewImagesClientWithBaseURI creates an instance of the ImagesClient client using a custom endpoint. Use this when
-// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewImagesClientWithBaseURI(baseURI string, subscriptionID string) ImagesClient {
- return ImagesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update an image.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// imageName - the name of the image.
-// parameters - parameters supplied to the Create Image operation.
-func (client ImagesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters Image) (result ImagesCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, imageName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, imageName string, parameters Image) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "imageName": autorest.Encode("path", imageName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future ImagesCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client ImagesClient) CreateOrUpdateResponder(resp *http.Response) (result Image, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete deletes an Image.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// imageName - the name of the image.
-func (client ImagesClient) Delete(ctx context.Context, resourceGroupName string, imageName string) (result ImagesDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, imageName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName string, imageName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "imageName": autorest.Encode("path", imageName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client ImagesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get gets an image.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// imageName - the name of the image.
-// expand - the expand expression to apply on the operation.
-func (client ImagesClient) Get(ctx context.Context, resourceGroupName string, imageName string, expand string) (result Image, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, imageName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName string, imageName string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "imageName": autorest.Encode("path", imageName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client ImagesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client ImagesClient) GetResponder(resp *http.Response) (result Image, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets the list of Images in the subscription. Use nextLink property in the response to get the next page of
-// Images. Do this till nextLink is null to fetch all the Images.
-func (client ImagesClient) List(ctx context.Context) (result ImageListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.List")
- defer func() {
- sc := -1
- if result.ilr.Response.Response != nil {
- sc = result.ilr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.ilr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.ilr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client ImagesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client ImagesClient) ListResponder(resp *http.Response) (result ImageListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client ImagesClient) listNextResults(ctx context.Context, lastResults ImageListResult) (result ImageListResult, err error) {
- req, err := lastResults.imageListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client ImagesClient) ListComplete(ctx context.Context) (result ImageListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx)
- return
-}
-
-// ListByResourceGroup gets the list of images under a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client ImagesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ImageListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.ilr.Response.Response != nil {
- sc = result.ilr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.ilr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.ilr, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client ImagesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client ImagesClient) ListByResourceGroupResponder(resp *http.Response) (result ImageListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client ImagesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ImageListResult) (result ImageListResult, err error) {
- req, err := lastResults.imageListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client ImagesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ImageListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// Update update an image.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// imageName - the name of the image.
-// parameters - parameters supplied to the Update Image operation.
-func (client ImagesClient) Update(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate) (result ImagesUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImagesClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, imageName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "imageName": autorest.Encode("path", imageName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client ImagesClient) UpdateResponder(resp *http.Response) (result Image, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go
deleted file mode 100644
index ec30f9cc8c61..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/loganalytics.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// LogAnalyticsClient is the compute Client
-type LogAnalyticsClient struct {
- BaseClient
-}
-
-// NewLogAnalyticsClient creates an instance of the LogAnalyticsClient client.
-func NewLogAnalyticsClient(subscriptionID string) LogAnalyticsClient {
- return NewLogAnalyticsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewLogAnalyticsClientWithBaseURI creates an instance of the LogAnalyticsClient client using a custom endpoint. Use
-// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewLogAnalyticsClientWithBaseURI(baseURI string, subscriptionID string) LogAnalyticsClient {
- return LogAnalyticsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// ExportRequestRateByInterval export logs that show Api requests made by this subscription in the given time window to
-// show throttling activities.
-// Parameters:
-// parameters - parameters supplied to the LogAnalytics getRequestRateByInterval Api.
-// location - the location upon which virtual-machine-sizes is queried.
-func (client LogAnalyticsClient) ExportRequestRateByInterval(ctx context.Context, parameters RequestRateByIntervalInput, location string) (result LogAnalyticsExportRequestRateByIntervalFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/LogAnalyticsClient.ExportRequestRateByInterval")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: location,
- Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.LogAnalyticsClient", "ExportRequestRateByInterval", err.Error())
- }
-
- req, err := client.ExportRequestRateByIntervalPreparer(ctx, parameters, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ExportRequestRateByIntervalSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ExportRequestRateByIntervalPreparer prepares the ExportRequestRateByInterval request.
-func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context.Context, parameters RequestRateByIntervalInput, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ExportRequestRateByIntervalSender sends the ExportRequestRateByInterval request. The method will close the
-// http.Response Body if it receives an error.
-func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Request) (future LogAnalyticsExportRequestRateByIntervalFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ExportRequestRateByIntervalResponder handles the response to the ExportRequestRateByInterval request. The method always
-// closes the http.Response Body.
-func (client LogAnalyticsClient) ExportRequestRateByIntervalResponder(resp *http.Response) (result LogAnalyticsOperationResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ExportThrottledRequests export logs that show total throttled Api requests for this subscription in the given time
-// window.
-// Parameters:
-// parameters - parameters supplied to the LogAnalytics getThrottledRequests Api.
-// location - the location upon which virtual-machine-sizes is queried.
-func (client LogAnalyticsClient) ExportThrottledRequests(ctx context.Context, parameters ThrottledRequestsInput, location string) (result LogAnalyticsExportThrottledRequestsFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/LogAnalyticsClient.ExportThrottledRequests")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: location,
- Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.LogAnalyticsClient", "ExportThrottledRequests", err.Error())
- }
-
- req, err := client.ExportThrottledRequestsPreparer(ctx, parameters, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ExportThrottledRequestsSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ExportThrottledRequestsPreparer prepares the ExportThrottledRequests request.
-func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Context, parameters ThrottledRequestsInput, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ExportThrottledRequestsSender sends the ExportThrottledRequests request. The method will close the
-// http.Response Body if it receives an error.
-func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request) (future LogAnalyticsExportThrottledRequestsFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ExportThrottledRequestsResponder handles the response to the ExportThrottledRequests request. The method always
-// closes the http.Response Body.
-func (client LogAnalyticsClient) ExportThrottledRequestsResponder(resp *http.Response) (result LogAnalyticsOperationResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go
deleted file mode 100644
index 9718eb39f588..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/models.go
+++ /dev/null
@@ -1,23422 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "encoding/json"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/date"
- "github.com/Azure/go-autorest/autorest/to"
- "github.com/Azure/go-autorest/tracing"
- "io"
- "net/http"
-)
-
-// The package's fully qualified name.
-const fqdn = "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute"
-
-// AccessURI a disk access SAS uri.
-type AccessURI struct {
- autorest.Response `json:"-"`
- // AccessSAS - READ-ONLY; A SAS uri for accessing a disk.
- AccessSAS *string `json:"accessSAS,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for AccessURI.
-func (au AccessURI) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// AdditionalCapabilities enables or disables a capability on the virtual machine or virtual machine scale
-// set.
-type AdditionalCapabilities struct {
- // UltraSSDEnabled - The flag that enables or disables a capability to have one or more managed data disks with UltraSSD_LRS storage account type on the VM or VMSS. Managed disks with storage account type UltraSSD_LRS can be added to a virtual machine or virtual machine scale set only if this property is enabled.
- UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"`
- // HibernationEnabled - The flag that enables or disables hibernation capability on the VM.
- HibernationEnabled *bool `json:"hibernationEnabled,omitempty"`
-}
-
-// AdditionalUnattendContent specifies additional XML formatted information that can be included in the
-// Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name,
-// and the pass in which the content is applied.
-type AdditionalUnattendContent struct {
- // PassName - The pass name. Currently, the only allowable value is OobeSystem. Possible values include: 'PassNamesOobeSystem'
- PassName PassNames `json:"passName,omitempty"`
- // ComponentName - The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. Possible values include: 'ComponentNamesMicrosoftWindowsShellSetup'
- ComponentName ComponentNames `json:"componentName,omitempty"`
- // SettingName - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. Possible values include: 'SettingNamesAutoLogon', 'SettingNamesFirstLogonCommands'
- SettingName SettingNames `json:"settingName,omitempty"`
- // Content - Specifies the XML formatted content that is added to the unattend.xml file for the specified path and component. The XML must be less than 4KB and must include the root element for the setting or feature that is being inserted.
- Content *string `json:"content,omitempty"`
-}
-
-// APIEntityReference the API entity reference.
-type APIEntityReference struct {
- // ID - The ARM resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/...
- ID *string `json:"id,omitempty"`
-}
-
-// APIError api error.
-type APIError struct {
- // Details - The Api error details
- Details *[]APIErrorBase `json:"details,omitempty"`
- // Innererror - The Api inner error
- Innererror *InnerError `json:"innererror,omitempty"`
- // Code - The error code.
- Code *string `json:"code,omitempty"`
- // Target - The target of the particular error.
- Target *string `json:"target,omitempty"`
- // Message - The error message.
- Message *string `json:"message,omitempty"`
-}
-
-// APIErrorBase api error base.
-type APIErrorBase struct {
- // Code - The error code.
- Code *string `json:"code,omitempty"`
- // Target - The target of the particular error.
- Target *string `json:"target,omitempty"`
- // Message - The error message.
- Message *string `json:"message,omitempty"`
-}
-
-// ApplicationProfile contains the list of gallery applications that should be made available to the
-// VM/VMSS
-type ApplicationProfile struct {
- // GalleryApplications - Specifies the gallery applications that should be made available to the VM/VMSS
- GalleryApplications *[]VMGalleryApplication `json:"galleryApplications,omitempty"`
-}
-
-// AutomaticOSUpgradePolicy the configuration parameters used for performing automatic OS upgrade.
-type AutomaticOSUpgradePolicy struct {
- // EnableAutomaticOSUpgrade - Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. Default value is false.
If this is set to true for Windows based scale sets, [enableAutomaticUpdates](https://docs.microsoft.com/dotnet/api/microsoft.azure.management.compute.models.windowsconfiguration.enableautomaticupdates?view=azure-dotnet) is automatically set to false and cannot be set to true.
- EnableAutomaticOSUpgrade *bool `json:"enableAutomaticOSUpgrade,omitempty"`
- // DisableAutomaticRollback - Whether OS image rollback feature should be disabled. Default value is false.
- DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty"`
-}
-
-// AutomaticOSUpgradeProperties describes automatic OS upgrade properties on the image.
-type AutomaticOSUpgradeProperties struct {
- // AutomaticOSUpgradeSupported - Specifies whether automatic OS upgrade is supported on the image.
- AutomaticOSUpgradeSupported *bool `json:"automaticOSUpgradeSupported,omitempty"`
-}
-
-// AutomaticRepairsPolicy specifies the configuration parameters for automatic repairs on the virtual
-// machine scale set.
-type AutomaticRepairsPolicy struct {
- // Enabled - Specifies whether automatic repairs should be enabled on the virtual machine scale set. The default value is false.
- Enabled *bool `json:"enabled,omitempty"`
- // GracePeriod - The amount of time for which automatic repairs are suspended due to a state change on VM. The grace time starts after the state change has completed. This helps avoid premature or accidental repairs. The time duration should be specified in ISO 8601 format. The minimum allowed grace period is 30 minutes (PT30M), which is also the default value. The maximum allowed grace period is 90 minutes (PT90M).
- GracePeriod *string `json:"gracePeriod,omitempty"`
-}
-
-// AvailabilitySet specifies information about the availability set that the virtual machine should be
-// assigned to. Virtual machines specified in the same availability set are allocated to different nodes to
-// maximize availability. For more information about availability sets, see [Availability sets
-// overview](https://docs.microsoft.com/azure/virtual-machines/availability-set-overview).
For
-// more information on Azure planned maintenance, see [Maintenance and updates for Virtual Machines in
-// Azure](https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates)
Currently, a
-// VM can only be added to availability set at creation time. An existing VM cannot be added to an
-// availability set.
-type AvailabilitySet struct {
- autorest.Response `json:"-"`
- *AvailabilitySetProperties `json:"properties,omitempty"`
- // Sku - Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
- Sku *Sku `json:"sku,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for AvailabilitySet.
-func (as AvailabilitySet) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if as.AvailabilitySetProperties != nil {
- objectMap["properties"] = as.AvailabilitySetProperties
- }
- if as.Sku != nil {
- objectMap["sku"] = as.Sku
- }
- if as.Location != nil {
- objectMap["location"] = as.Location
- }
- if as.Tags != nil {
- objectMap["tags"] = as.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for AvailabilitySet struct.
-func (as *AvailabilitySet) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var availabilitySetProperties AvailabilitySetProperties
- err = json.Unmarshal(*v, &availabilitySetProperties)
- if err != nil {
- return err
- }
- as.AvailabilitySetProperties = &availabilitySetProperties
- }
- case "sku":
- if v != nil {
- var sku Sku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- as.Sku = &sku
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- as.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- as.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- as.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- as.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- as.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// AvailabilitySetListResult the List Availability Set operation response.
-type AvailabilitySetListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of availability sets
- Value *[]AvailabilitySet `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of AvailabilitySets. Call ListNext() with this URI to fetch the next page of AvailabilitySets.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// AvailabilitySetListResultIterator provides access to a complete listing of AvailabilitySet values.
-type AvailabilitySetListResultIterator struct {
- i int
- page AvailabilitySetListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *AvailabilitySetListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *AvailabilitySetListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter AvailabilitySetListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter AvailabilitySetListResultIterator) Response() AvailabilitySetListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter AvailabilitySetListResultIterator) Value() AvailabilitySet {
- if !iter.page.NotDone() {
- return AvailabilitySet{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the AvailabilitySetListResultIterator type.
-func NewAvailabilitySetListResultIterator(page AvailabilitySetListResultPage) AvailabilitySetListResultIterator {
- return AvailabilitySetListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (aslr AvailabilitySetListResult) IsEmpty() bool {
- return aslr.Value == nil || len(*aslr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (aslr AvailabilitySetListResult) hasNextLink() bool {
- return aslr.NextLink != nil && len(*aslr.NextLink) != 0
-}
-
-// availabilitySetListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (aslr AvailabilitySetListResult) availabilitySetListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !aslr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(aslr.NextLink)))
-}
-
-// AvailabilitySetListResultPage contains a page of AvailabilitySet values.
-type AvailabilitySetListResultPage struct {
- fn func(context.Context, AvailabilitySetListResult) (AvailabilitySetListResult, error)
- aslr AvailabilitySetListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *AvailabilitySetListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.aslr)
- if err != nil {
- return err
- }
- page.aslr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *AvailabilitySetListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page AvailabilitySetListResultPage) NotDone() bool {
- return !page.aslr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page AvailabilitySetListResultPage) Response() AvailabilitySetListResult {
- return page.aslr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page AvailabilitySetListResultPage) Values() []AvailabilitySet {
- if page.aslr.IsEmpty() {
- return nil
- }
- return *page.aslr.Value
-}
-
-// Creates a new instance of the AvailabilitySetListResultPage type.
-func NewAvailabilitySetListResultPage(cur AvailabilitySetListResult, getNextPage func(context.Context, AvailabilitySetListResult) (AvailabilitySetListResult, error)) AvailabilitySetListResultPage {
- return AvailabilitySetListResultPage{
- fn: getNextPage,
- aslr: cur,
- }
-}
-
-// AvailabilitySetProperties the instance view of a resource.
-type AvailabilitySetProperties struct {
- // PlatformUpdateDomainCount - Update Domain count.
- PlatformUpdateDomainCount *int32 `json:"platformUpdateDomainCount,omitempty"`
- // PlatformFaultDomainCount - Fault Domain count.
- PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"`
- // VirtualMachines - A list of references to all virtual machines in the availability set.
- VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"`
- // ProximityPlacementGroup - Specifies information about the proximity placement group that the availability set should be assigned to.
Minimum api-version: 2018-04-01.
- ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"`
- // Statuses - READ-ONLY; The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for AvailabilitySetProperties.
-func (asp AvailabilitySetProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if asp.PlatformUpdateDomainCount != nil {
- objectMap["platformUpdateDomainCount"] = asp.PlatformUpdateDomainCount
- }
- if asp.PlatformFaultDomainCount != nil {
- objectMap["platformFaultDomainCount"] = asp.PlatformFaultDomainCount
- }
- if asp.VirtualMachines != nil {
- objectMap["virtualMachines"] = asp.VirtualMachines
- }
- if asp.ProximityPlacementGroup != nil {
- objectMap["proximityPlacementGroup"] = asp.ProximityPlacementGroup
- }
- return json.Marshal(objectMap)
-}
-
-// AvailabilitySetUpdate specifies information about the availability set that the virtual machine should
-// be assigned to. Only tags may be updated.
-type AvailabilitySetUpdate struct {
- *AvailabilitySetProperties `json:"properties,omitempty"`
- // Sku - Sku of the availability set
- Sku *Sku `json:"sku,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for AvailabilitySetUpdate.
-func (asu AvailabilitySetUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if asu.AvailabilitySetProperties != nil {
- objectMap["properties"] = asu.AvailabilitySetProperties
- }
- if asu.Sku != nil {
- objectMap["sku"] = asu.Sku
- }
- if asu.Tags != nil {
- objectMap["tags"] = asu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for AvailabilitySetUpdate struct.
-func (asu *AvailabilitySetUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var availabilitySetProperties AvailabilitySetProperties
- err = json.Unmarshal(*v, &availabilitySetProperties)
- if err != nil {
- return err
- }
- asu.AvailabilitySetProperties = &availabilitySetProperties
- }
- case "sku":
- if v != nil {
- var sku Sku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- asu.Sku = &sku
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- asu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// AvailablePatchSummary describes the properties of an virtual machine instance view for available patch
-// summary.
-type AvailablePatchSummary struct {
- // Status - READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. At that point it will become "Unknown", "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values include: 'PatchOperationStatusUnknown', 'PatchOperationStatusInProgress', 'PatchOperationStatusFailed', 'PatchOperationStatusSucceeded', 'PatchOperationStatusCompletedWithWarnings'
- Status PatchOperationStatus `json:"status,omitempty"`
- // AssessmentActivityID - READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension logs.
- AssessmentActivityID *string `json:"assessmentActivityId,omitempty"`
- // RebootPending - READ-ONLY; The overall reboot status of the VM. It will be true when partially installed patches require a reboot to complete installation but the reboot has not yet occurred.
- RebootPending *bool `json:"rebootPending,omitempty"`
- // CriticalAndSecurityPatchCount - READ-ONLY; The number of critical or security patches that have been detected as available and not yet installed.
- CriticalAndSecurityPatchCount *int32 `json:"criticalAndSecurityPatchCount,omitempty"`
- // OtherPatchCount - READ-ONLY; The number of all available patches excluding critical and security.
- OtherPatchCount *int32 `json:"otherPatchCount,omitempty"`
- // StartTime - READ-ONLY; The UTC timestamp when the operation began.
- StartTime *date.Time `json:"startTime,omitempty"`
- // LastModifiedTime - READ-ONLY; The UTC timestamp when the operation began.
- LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
- // Error - READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them.
- Error *APIError `json:"error,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for AvailablePatchSummary.
-func (aps AvailablePatchSummary) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// BillingProfile specifies the billing related details of a Azure Spot VM or VMSS.
Minimum
-// api-version: 2019-03-01.
-type BillingProfile struct {
- // MaxPrice - Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS. This price is in US Dollars.
This price will be compared with the current Azure Spot price for the VM size. Also, the prices are compared at the time of create/update of Azure Spot VM/VMSS and the operation will only succeed if the maxPrice is greater than the current Azure Spot price.
The maxPrice will also be used for evicting a Azure Spot VM/VMSS if the current Azure Spot price goes beyond the maxPrice after creation of VM/VMSS.
Possible values are:
- Any decimal value greater than zero. Example: 0.01538
-1 – indicates default price to be up-to on-demand.
You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS should not be evicted for price reasons. Also, the default max price is -1 if it is not provided by you.
Minimum api-version: 2019-03-01.
- MaxPrice *float64 `json:"maxPrice,omitempty"`
-}
-
-// BootDiagnostics boot Diagnostics is a debugging feature which allows you to view Console Output and
-// Screenshot to diagnose VM status.
You can easily view the output of your console log.
-// Azure also enables you to see a screenshot of the VM from the hypervisor.
-type BootDiagnostics struct {
- // Enabled - Whether boot diagnostics should be enabled on the Virtual Machine.
- Enabled *bool `json:"enabled,omitempty"`
- // StorageURI - Uri of the storage account to use for placing the console output and screenshot.
If storageUri is not specified while enabling boot diagnostics, managed storage will be used.
- StorageURI *string `json:"storageUri,omitempty"`
-}
-
-// BootDiagnosticsInstanceView the instance view of a virtual machine boot diagnostics.
-type BootDiagnosticsInstanceView struct {
- // ConsoleScreenshotBlobURI - READ-ONLY; The console screenshot blob URI.
NOTE: This will **not** be set if boot diagnostics is currently enabled with managed storage.
- ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"`
- // SerialConsoleLogBlobURI - READ-ONLY; The serial console log blob Uri.
NOTE: This will **not** be set if boot diagnostics is currently enabled with managed storage.
- SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"`
- // Status - READ-ONLY; The boot diagnostics status information for the VM.
NOTE: It will be set only if there are errors encountered in enabling boot diagnostics.
- Status *InstanceViewStatus `json:"status,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for BootDiagnosticsInstanceView.
-func (bdiv BootDiagnosticsInstanceView) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// CapacityReservation specifies information about the capacity reservation.
-type CapacityReservation struct {
- autorest.Response `json:"-"`
- *CapacityReservationProperties `json:"properties,omitempty"`
- // Sku - SKU of the resource for which capacity needs be reserved. The SKU name and capacity is required to be set. Currently VM Skus with the capability called 'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.
- Sku *Sku `json:"sku,omitempty"`
- // Zones - Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. The zone can be assigned only during creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.
- Zones *[]string `json:"zones,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservation.
-func (cr CapacityReservation) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if cr.CapacityReservationProperties != nil {
- objectMap["properties"] = cr.CapacityReservationProperties
- }
- if cr.Sku != nil {
- objectMap["sku"] = cr.Sku
- }
- if cr.Zones != nil {
- objectMap["zones"] = cr.Zones
- }
- if cr.Location != nil {
- objectMap["location"] = cr.Location
- }
- if cr.Tags != nil {
- objectMap["tags"] = cr.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for CapacityReservation struct.
-func (cr *CapacityReservation) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var capacityReservationProperties CapacityReservationProperties
- err = json.Unmarshal(*v, &capacityReservationProperties)
- if err != nil {
- return err
- }
- cr.CapacityReservationProperties = &capacityReservationProperties
- }
- case "sku":
- if v != nil {
- var sku Sku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- cr.Sku = &sku
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- cr.Zones = &zones
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- cr.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- cr.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- cr.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- cr.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- cr.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// CapacityReservationGroup specifies information about the capacity reservation group that the capacity
-// reservations should be assigned to.
Currently, a capacity reservation can only be added to a
-// capacity reservation group at creation time. An existing capacity reservation cannot be added or moved
-// to another capacity reservation group.
-type CapacityReservationGroup struct {
- autorest.Response `json:"-"`
- *CapacityReservationGroupProperties `json:"properties,omitempty"`
- // Zones - Availability Zones to use for this capacity reservation group. The zones can be assigned only during creation. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.
- Zones *[]string `json:"zones,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservationGroup.
-func (crg CapacityReservationGroup) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if crg.CapacityReservationGroupProperties != nil {
- objectMap["properties"] = crg.CapacityReservationGroupProperties
- }
- if crg.Zones != nil {
- objectMap["zones"] = crg.Zones
- }
- if crg.Location != nil {
- objectMap["location"] = crg.Location
- }
- if crg.Tags != nil {
- objectMap["tags"] = crg.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for CapacityReservationGroup struct.
-func (crg *CapacityReservationGroup) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var capacityReservationGroupProperties CapacityReservationGroupProperties
- err = json.Unmarshal(*v, &capacityReservationGroupProperties)
- if err != nil {
- return err
- }
- crg.CapacityReservationGroupProperties = &capacityReservationGroupProperties
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- crg.Zones = &zones
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- crg.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- crg.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- crg.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- crg.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- crg.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// CapacityReservationGroupInstanceView ...
-type CapacityReservationGroupInstanceView struct {
- // CapacityReservations - READ-ONLY; List of instance view of the capacity reservations under the capacity reservation group.
- CapacityReservations *[]CapacityReservationInstanceViewWithName `json:"capacityReservations,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservationGroupInstanceView.
-func (crgiv CapacityReservationGroupInstanceView) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// CapacityReservationGroupListResult the List capacity reservation group with resource group response.
-type CapacityReservationGroupListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of capacity reservation groups
- Value *[]CapacityReservationGroup `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of capacity reservation groups. Call ListNext() with this URI to fetch the next page of capacity reservation groups.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// CapacityReservationGroupListResultIterator provides access to a complete listing of
-// CapacityReservationGroup values.
-type CapacityReservationGroupListResultIterator struct {
- i int
- page CapacityReservationGroupListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *CapacityReservationGroupListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *CapacityReservationGroupListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter CapacityReservationGroupListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter CapacityReservationGroupListResultIterator) Response() CapacityReservationGroupListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter CapacityReservationGroupListResultIterator) Value() CapacityReservationGroup {
- if !iter.page.NotDone() {
- return CapacityReservationGroup{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the CapacityReservationGroupListResultIterator type.
-func NewCapacityReservationGroupListResultIterator(page CapacityReservationGroupListResultPage) CapacityReservationGroupListResultIterator {
- return CapacityReservationGroupListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (crglr CapacityReservationGroupListResult) IsEmpty() bool {
- return crglr.Value == nil || len(*crglr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (crglr CapacityReservationGroupListResult) hasNextLink() bool {
- return crglr.NextLink != nil && len(*crglr.NextLink) != 0
-}
-
-// capacityReservationGroupListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (crglr CapacityReservationGroupListResult) capacityReservationGroupListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !crglr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(crglr.NextLink)))
-}
-
-// CapacityReservationGroupListResultPage contains a page of CapacityReservationGroup values.
-type CapacityReservationGroupListResultPage struct {
- fn func(context.Context, CapacityReservationGroupListResult) (CapacityReservationGroupListResult, error)
- crglr CapacityReservationGroupListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *CapacityReservationGroupListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationGroupListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.crglr)
- if err != nil {
- return err
- }
- page.crglr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *CapacityReservationGroupListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page CapacityReservationGroupListResultPage) NotDone() bool {
- return !page.crglr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page CapacityReservationGroupListResultPage) Response() CapacityReservationGroupListResult {
- return page.crglr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page CapacityReservationGroupListResultPage) Values() []CapacityReservationGroup {
- if page.crglr.IsEmpty() {
- return nil
- }
- return *page.crglr.Value
-}
-
-// Creates a new instance of the CapacityReservationGroupListResultPage type.
-func NewCapacityReservationGroupListResultPage(cur CapacityReservationGroupListResult, getNextPage func(context.Context, CapacityReservationGroupListResult) (CapacityReservationGroupListResult, error)) CapacityReservationGroupListResultPage {
- return CapacityReservationGroupListResultPage{
- fn: getNextPage,
- crglr: cur,
- }
-}
-
-// CapacityReservationGroupProperties capacity reservation group Properties.
-type CapacityReservationGroupProperties struct {
- // CapacityReservations - READ-ONLY; A list of all capacity reservation resource ids that belong to capacity reservation group.
- CapacityReservations *[]SubResourceReadOnly `json:"capacityReservations,omitempty"`
- // VirtualMachinesAssociated - READ-ONLY; A list of references to all virtual machines associated to the capacity reservation group.
- VirtualMachinesAssociated *[]SubResourceReadOnly `json:"virtualMachinesAssociated,omitempty"`
- // InstanceView - READ-ONLY; The capacity reservation group instance view which has the list of instance views for all the capacity reservations that belong to the capacity reservation group.
- InstanceView *CapacityReservationGroupInstanceView `json:"instanceView,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservationGroupProperties.
-func (crgp CapacityReservationGroupProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// CapacityReservationGroupUpdate specifies information about the capacity reservation group. Only tags can
-// be updated.
-type CapacityReservationGroupUpdate struct {
- *CapacityReservationGroupProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservationGroupUpdate.
-func (crgu CapacityReservationGroupUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if crgu.CapacityReservationGroupProperties != nil {
- objectMap["properties"] = crgu.CapacityReservationGroupProperties
- }
- if crgu.Tags != nil {
- objectMap["tags"] = crgu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for CapacityReservationGroupUpdate struct.
-func (crgu *CapacityReservationGroupUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var capacityReservationGroupProperties CapacityReservationGroupProperties
- err = json.Unmarshal(*v, &capacityReservationGroupProperties)
- if err != nil {
- return err
- }
- crgu.CapacityReservationGroupProperties = &capacityReservationGroupProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- crgu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// CapacityReservationInstanceView the instance view of a capacity reservation that provides as snapshot of
-// the runtime properties of the capacity reservation that is managed by the platform and can change
-// outside of control plane operations.
-type CapacityReservationInstanceView struct {
- // UtilizationInfo - Unutilized capacity of the capacity reservation.
- UtilizationInfo *CapacityReservationUtilization `json:"utilizationInfo,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// CapacityReservationInstanceViewWithName the instance view of a capacity reservation that includes the
-// name of the capacity reservation. It is used for the response to the instance view of a capacity
-// reservation group.
-type CapacityReservationInstanceViewWithName struct {
- // Name - READ-ONLY; The name of the capacity reservation.
- Name *string `json:"name,omitempty"`
- // UtilizationInfo - Unutilized capacity of the capacity reservation.
- UtilizationInfo *CapacityReservationUtilization `json:"utilizationInfo,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservationInstanceViewWithName.
-func (crivwn CapacityReservationInstanceViewWithName) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if crivwn.UtilizationInfo != nil {
- objectMap["utilizationInfo"] = crivwn.UtilizationInfo
- }
- if crivwn.Statuses != nil {
- objectMap["statuses"] = crivwn.Statuses
- }
- return json.Marshal(objectMap)
-}
-
-// CapacityReservationListResult the list capacity reservation operation response.
-type CapacityReservationListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of capacity reservations
- Value *[]CapacityReservation `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of capacity reservations. Call ListNext() with this URI to fetch the next page of capacity reservations.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// CapacityReservationListResultIterator provides access to a complete listing of CapacityReservation
-// values.
-type CapacityReservationListResultIterator struct {
- i int
- page CapacityReservationListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *CapacityReservationListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *CapacityReservationListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter CapacityReservationListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter CapacityReservationListResultIterator) Response() CapacityReservationListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter CapacityReservationListResultIterator) Value() CapacityReservation {
- if !iter.page.NotDone() {
- return CapacityReservation{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the CapacityReservationListResultIterator type.
-func NewCapacityReservationListResultIterator(page CapacityReservationListResultPage) CapacityReservationListResultIterator {
- return CapacityReservationListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (crlr CapacityReservationListResult) IsEmpty() bool {
- return crlr.Value == nil || len(*crlr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (crlr CapacityReservationListResult) hasNextLink() bool {
- return crlr.NextLink != nil && len(*crlr.NextLink) != 0
-}
-
-// capacityReservationListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (crlr CapacityReservationListResult) capacityReservationListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !crlr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(crlr.NextLink)))
-}
-
-// CapacityReservationListResultPage contains a page of CapacityReservation values.
-type CapacityReservationListResultPage struct {
- fn func(context.Context, CapacityReservationListResult) (CapacityReservationListResult, error)
- crlr CapacityReservationListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *CapacityReservationListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CapacityReservationListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.crlr)
- if err != nil {
- return err
- }
- page.crlr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *CapacityReservationListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page CapacityReservationListResultPage) NotDone() bool {
- return !page.crlr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page CapacityReservationListResultPage) Response() CapacityReservationListResult {
- return page.crlr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page CapacityReservationListResultPage) Values() []CapacityReservation {
- if page.crlr.IsEmpty() {
- return nil
- }
- return *page.crlr.Value
-}
-
-// Creates a new instance of the CapacityReservationListResultPage type.
-func NewCapacityReservationListResultPage(cur CapacityReservationListResult, getNextPage func(context.Context, CapacityReservationListResult) (CapacityReservationListResult, error)) CapacityReservationListResultPage {
- return CapacityReservationListResultPage{
- fn: getNextPage,
- crlr: cur,
- }
-}
-
-// CapacityReservationProfile the parameters of a capacity reservation Profile.
-type CapacityReservationProfile struct {
- // CapacityReservationGroup - Specifies the capacity reservation group resource id that should be used for allocating the virtual machine or scaleset vm instances provided enough capacity has been reserved. Please refer to https://aka.ms/CapacityReservation for more details.
- CapacityReservationGroup *SubResource `json:"capacityReservationGroup,omitempty"`
-}
-
-// CapacityReservationProperties properties of the Capacity reservation.
-type CapacityReservationProperties struct {
- // ReservationID - READ-ONLY; A unique id generated and assigned to the capacity reservation by the platform which does not change throughout the lifetime of the resource.
- ReservationID *string `json:"reservationId,omitempty"`
- // VirtualMachinesAssociated - READ-ONLY; A list of all virtual machine resource ids that are associated with the capacity reservation.
- VirtualMachinesAssociated *[]SubResourceReadOnly `json:"virtualMachinesAssociated,omitempty"`
- // ProvisioningTime - READ-ONLY; The date time when the capacity reservation was last updated.
- ProvisioningTime *date.Time `json:"provisioningTime,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // InstanceView - READ-ONLY; The Capacity reservation instance view.
- InstanceView *CapacityReservationInstanceView `json:"instanceView,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservationProperties.
-func (crp CapacityReservationProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// CapacityReservationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CapacityReservationsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CapacityReservationsClient) (CapacityReservation, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CapacityReservationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CapacityReservationsCreateOrUpdateFuture.Result.
-func (future *CapacityReservationsCreateOrUpdateFuture) result(client CapacityReservationsClient) (cr CapacityReservation, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- cr.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CapacityReservationsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if cr.Response.Response, err = future.GetResult(sender); err == nil && cr.Response.Response.StatusCode != http.StatusNoContent {
- cr, err = client.CreateOrUpdateResponder(cr.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsCreateOrUpdateFuture", "Result", cr.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// CapacityReservationsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CapacityReservationsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CapacityReservationsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CapacityReservationsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CapacityReservationsDeleteFuture.Result.
-func (future *CapacityReservationsDeleteFuture) result(client CapacityReservationsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CapacityReservationsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CapacityReservationsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CapacityReservationsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CapacityReservationsClient) (CapacityReservation, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CapacityReservationsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CapacityReservationsUpdateFuture.Result.
-func (future *CapacityReservationsUpdateFuture) result(client CapacityReservationsClient) (cr CapacityReservation, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- cr.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CapacityReservationsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if cr.Response.Response, err = future.GetResult(sender); err == nil && cr.Response.Response.StatusCode != http.StatusNoContent {
- cr, err = client.UpdateResponder(cr.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CapacityReservationsUpdateFuture", "Result", cr.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// CapacityReservationUpdate specifies information about the capacity reservation. Only tags and
-// sku.capacity can be updated.
-type CapacityReservationUpdate struct {
- *CapacityReservationProperties `json:"properties,omitempty"`
- // Sku - SKU of the resource for which capacity needs be reserved. The SKU name and capacity is required to be set. Currently VM Skus with the capability called 'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.
- Sku *Sku `json:"sku,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservationUpdate.
-func (cru CapacityReservationUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if cru.CapacityReservationProperties != nil {
- objectMap["properties"] = cru.CapacityReservationProperties
- }
- if cru.Sku != nil {
- objectMap["sku"] = cru.Sku
- }
- if cru.Tags != nil {
- objectMap["tags"] = cru.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for CapacityReservationUpdate struct.
-func (cru *CapacityReservationUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var capacityReservationProperties CapacityReservationProperties
- err = json.Unmarshal(*v, &capacityReservationProperties)
- if err != nil {
- return err
- }
- cru.CapacityReservationProperties = &capacityReservationProperties
- }
- case "sku":
- if v != nil {
- var sku Sku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- cru.Sku = &sku
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- cru.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// CapacityReservationUtilization represents the capacity reservation utilization in terms of resources
-// allocated.
-type CapacityReservationUtilization struct {
- // VirtualMachinesAllocated - READ-ONLY; A list of all virtual machines resource ids allocated against the capacity reservation.
- VirtualMachinesAllocated *[]SubResourceReadOnly `json:"virtualMachinesAllocated,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CapacityReservationUtilization.
-func (cru CapacityReservationUtilization) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// CloudError an error response from the Compute service.
-type CloudError struct {
- Error *APIError `json:"error,omitempty"`
-}
-
-// CloudService describes the cloud service.
-type CloudService struct {
- autorest.Response `json:"-"`
- // ID - READ-ONLY; Resource Id.
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name.
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type.
- Type *string `json:"type,omitempty"`
- // Location - Resource location.
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags.
- Tags map[string]*string `json:"tags"`
- Properties *CloudServiceProperties `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CloudService.
-func (cs CloudService) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if cs.Location != nil {
- objectMap["location"] = cs.Location
- }
- if cs.Tags != nil {
- objectMap["tags"] = cs.Tags
- }
- if cs.Properties != nil {
- objectMap["properties"] = cs.Properties
- }
- return json.Marshal(objectMap)
-}
-
-// CloudServiceExtensionProfile describes a cloud service extension profile.
-type CloudServiceExtensionProfile struct {
- // Extensions - List of extensions for the cloud service.
- Extensions *[]Extension `json:"extensions,omitempty"`
-}
-
-// CloudServiceExtensionProperties extension Properties.
-type CloudServiceExtensionProperties struct {
- // Publisher - The name of the extension handler publisher.
- Publisher *string `json:"publisher,omitempty"`
- // Type - Specifies the type of the extension.
- Type *string `json:"type,omitempty"`
- // TypeHandlerVersion - Specifies the version of the extension. Specifies the version of the extension. If this element is not specified or an asterisk (*) is used as the value, the latest version of the extension is used. If the value is specified with a major version number and an asterisk as the minor version number (X.), the latest minor version of the specified major version is selected. If a major version number and a minor version number are specified (X.Y), the specific extension version is selected. If a version is specified, an auto-upgrade is performed on the role instance.
- TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"`
- // AutoUpgradeMinorVersion - Explicitly specify whether platform can automatically upgrade typeHandlerVersion to higher minor versions when they become available.
- AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"`
- // Settings - Public settings for the extension. For JSON extensions, this is the JSON settings for the extension. For XML Extension (like RDP), this is the XML setting for the extension.
- Settings *string `json:"settings,omitempty"`
- // ProtectedSettings - Protected settings for the extension which are encrypted before sent to the role instance.
- ProtectedSettings *string `json:"protectedSettings,omitempty"`
- ProtectedSettingsFromKeyVault *CloudServiceVaultAndSecretReference `json:"protectedSettingsFromKeyVault,omitempty"`
- // ForceUpdateTag - Tag to force apply the provided public and protected settings.
- // Changing the tag value allows for re-running the extension without changing any of the public or protected settings.
- // If forceUpdateTag is not changed, updates to public or protected settings would still be applied by the handler.
- // If neither forceUpdateTag nor any of public or protected settings change, extension would flow to the role instance with the same sequence-number, and
- // it is up to handler implementation whether to re-run it or not
- ForceUpdateTag *string `json:"forceUpdateTag,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // RolesAppliedTo - Optional list of roles to apply this extension. If property is not specified or '*' is specified, extension is applied to all roles in the cloud service.
- RolesAppliedTo *[]string `json:"rolesAppliedTo,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CloudServiceExtensionProperties.
-func (csep CloudServiceExtensionProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if csep.Publisher != nil {
- objectMap["publisher"] = csep.Publisher
- }
- if csep.Type != nil {
- objectMap["type"] = csep.Type
- }
- if csep.TypeHandlerVersion != nil {
- objectMap["typeHandlerVersion"] = csep.TypeHandlerVersion
- }
- if csep.AutoUpgradeMinorVersion != nil {
- objectMap["autoUpgradeMinorVersion"] = csep.AutoUpgradeMinorVersion
- }
- if csep.Settings != nil {
- objectMap["settings"] = csep.Settings
- }
- if csep.ProtectedSettings != nil {
- objectMap["protectedSettings"] = csep.ProtectedSettings
- }
- if csep.ProtectedSettingsFromKeyVault != nil {
- objectMap["protectedSettingsFromKeyVault"] = csep.ProtectedSettingsFromKeyVault
- }
- if csep.ForceUpdateTag != nil {
- objectMap["forceUpdateTag"] = csep.ForceUpdateTag
- }
- if csep.RolesAppliedTo != nil {
- objectMap["rolesAppliedTo"] = csep.RolesAppliedTo
- }
- return json.Marshal(objectMap)
-}
-
-// CloudServiceInstanceView instanceView of CloudService as a whole
-type CloudServiceInstanceView struct {
- autorest.Response `json:"-"`
- RoleInstance *InstanceViewStatusesSummary `json:"roleInstance,omitempty"`
- // SdkVersion - READ-ONLY; The version of the SDK that was used to generate the package for the cloud service.
- SdkVersion *string `json:"sdkVersion,omitempty"`
- // PrivateIds - READ-ONLY; Specifies a list of unique identifiers generated internally for the cloud service.
NOTE: If you are using Azure Diagnostics extension, this property can be used as 'DeploymentId' for querying details.
- PrivateIds *[]string `json:"privateIds,omitempty"`
- // Statuses - READ-ONLY
- Statuses *[]ResourceInstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CloudServiceInstanceView.
-func (csiv CloudServiceInstanceView) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if csiv.RoleInstance != nil {
- objectMap["roleInstance"] = csiv.RoleInstance
- }
- return json.Marshal(objectMap)
-}
-
-// CloudServiceListResult ...
-type CloudServiceListResult struct {
- autorest.Response `json:"-"`
- Value *[]CloudService `json:"value,omitempty"`
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// CloudServiceListResultIterator provides access to a complete listing of CloudService values.
-type CloudServiceListResultIterator struct {
- i int
- page CloudServiceListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *CloudServiceListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *CloudServiceListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter CloudServiceListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter CloudServiceListResultIterator) Response() CloudServiceListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter CloudServiceListResultIterator) Value() CloudService {
- if !iter.page.NotDone() {
- return CloudService{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the CloudServiceListResultIterator type.
-func NewCloudServiceListResultIterator(page CloudServiceListResultPage) CloudServiceListResultIterator {
- return CloudServiceListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (cslr CloudServiceListResult) IsEmpty() bool {
- return cslr.Value == nil || len(*cslr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (cslr CloudServiceListResult) hasNextLink() bool {
- return cslr.NextLink != nil && len(*cslr.NextLink) != 0
-}
-
-// cloudServiceListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (cslr CloudServiceListResult) cloudServiceListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !cslr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(cslr.NextLink)))
-}
-
-// CloudServiceListResultPage contains a page of CloudService values.
-type CloudServiceListResultPage struct {
- fn func(context.Context, CloudServiceListResult) (CloudServiceListResult, error)
- cslr CloudServiceListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *CloudServiceListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.cslr)
- if err != nil {
- return err
- }
- page.cslr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *CloudServiceListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page CloudServiceListResultPage) NotDone() bool {
- return !page.cslr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page CloudServiceListResultPage) Response() CloudServiceListResult {
- return page.cslr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page CloudServiceListResultPage) Values() []CloudService {
- if page.cslr.IsEmpty() {
- return nil
- }
- return *page.cslr.Value
-}
-
-// Creates a new instance of the CloudServiceListResultPage type.
-func NewCloudServiceListResultPage(cur CloudServiceListResult, getNextPage func(context.Context, CloudServiceListResult) (CloudServiceListResult, error)) CloudServiceListResultPage {
- return CloudServiceListResultPage{
- fn: getNextPage,
- cslr: cur,
- }
-}
-
-// CloudServiceNetworkProfile network Profile for the cloud service.
-type CloudServiceNetworkProfile struct {
- // LoadBalancerConfigurations - List of Load balancer configurations. Cloud service can have up to two load balancer configurations, corresponding to a Public Load Balancer and an Internal Load Balancer.
- LoadBalancerConfigurations *[]LoadBalancerConfiguration `json:"loadBalancerConfigurations,omitempty"`
- // SwappableCloudService - The id reference of the cloud service containing the target IP with which the subject cloud service can perform a swap. This property cannot be updated once it is set. The swappable cloud service referred by this id must be present otherwise an error will be thrown.
- SwappableCloudService *SubResource `json:"swappableCloudService,omitempty"`
-}
-
-// CloudServiceOsProfile describes the OS profile for the cloud service.
-type CloudServiceOsProfile struct {
- // Secrets - Specifies set of certificates that should be installed onto the role instances.
- Secrets *[]CloudServiceVaultSecretGroup `json:"secrets,omitempty"`
-}
-
-// CloudServiceProperties cloud service properties
-type CloudServiceProperties struct {
- // PackageURL - Specifies a URL that refers to the location of the service package in the Blob service. The service package URL can be Shared Access Signature (SAS) URI from any storage account.
- // This is a write-only property and is not returned in GET calls.
- PackageURL *string `json:"packageUrl,omitempty"`
- // Configuration - Specifies the XML service configuration (.cscfg) for the cloud service.
- Configuration *string `json:"configuration,omitempty"`
- // ConfigurationURL - Specifies a URL that refers to the location of the service configuration in the Blob service. The service package URL can be Shared Access Signature (SAS) URI from any storage account.
- // This is a write-only property and is not returned in GET calls.
- ConfigurationURL *string `json:"configurationUrl,omitempty"`
- // StartCloudService - (Optional) Indicates whether to start the cloud service immediately after it is created. The default value is `true`.
- // If false, the service model is still deployed, but the code is not run immediately. Instead, the service is PoweredOff until you call Start, at which time the service will be started. A deployed service still incurs charges, even if it is poweredoff.
- StartCloudService *bool `json:"startCloudService,omitempty"`
- // AllowModelOverride - (Optional) Indicates whether the role sku properties (roleProfile.roles.sku) specified in the model/template should override the role instance count and vm size specified in the .cscfg and .csdef respectively.
- // The default value is `false`.
- AllowModelOverride *bool `json:"allowModelOverride,omitempty"`
- // UpgradeMode - Possible values include: 'CloudServiceUpgradeModeAuto', 'CloudServiceUpgradeModeManual', 'CloudServiceUpgradeModeSimultaneous'
- UpgradeMode CloudServiceUpgradeMode `json:"upgradeMode,omitempty"`
- RoleProfile *CloudServiceRoleProfile `json:"roleProfile,omitempty"`
- OsProfile *CloudServiceOsProfile `json:"osProfile,omitempty"`
- NetworkProfile *CloudServiceNetworkProfile `json:"networkProfile,omitempty"`
- ExtensionProfile *CloudServiceExtensionProfile `json:"extensionProfile,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // UniqueID - READ-ONLY; The unique identifier for the cloud service.
- UniqueID *string `json:"uniqueId,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CloudServiceProperties.
-func (csp CloudServiceProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if csp.PackageURL != nil {
- objectMap["packageUrl"] = csp.PackageURL
- }
- if csp.Configuration != nil {
- objectMap["configuration"] = csp.Configuration
- }
- if csp.ConfigurationURL != nil {
- objectMap["configurationUrl"] = csp.ConfigurationURL
- }
- if csp.StartCloudService != nil {
- objectMap["startCloudService"] = csp.StartCloudService
- }
- if csp.AllowModelOverride != nil {
- objectMap["allowModelOverride"] = csp.AllowModelOverride
- }
- if csp.UpgradeMode != "" {
- objectMap["upgradeMode"] = csp.UpgradeMode
- }
- if csp.RoleProfile != nil {
- objectMap["roleProfile"] = csp.RoleProfile
- }
- if csp.OsProfile != nil {
- objectMap["osProfile"] = csp.OsProfile
- }
- if csp.NetworkProfile != nil {
- objectMap["networkProfile"] = csp.NetworkProfile
- }
- if csp.ExtensionProfile != nil {
- objectMap["extensionProfile"] = csp.ExtensionProfile
- }
- return json.Marshal(objectMap)
-}
-
-// CloudServiceRole describes a role of the cloud service.
-type CloudServiceRole struct {
- autorest.Response `json:"-"`
- // ID - READ-ONLY; Resource id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
- Sku *CloudServiceRoleSku `json:"sku,omitempty"`
- Properties *CloudServiceRoleProperties `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CloudServiceRole.
-func (csr CloudServiceRole) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if csr.Sku != nil {
- objectMap["sku"] = csr.Sku
- }
- if csr.Properties != nil {
- objectMap["properties"] = csr.Properties
- }
- return json.Marshal(objectMap)
-}
-
-// CloudServiceRoleInstancesDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CloudServiceRoleInstancesDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServiceRoleInstancesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServiceRoleInstancesDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServiceRoleInstancesDeleteFuture.Result.
-func (future *CloudServiceRoleInstancesDeleteFuture) result(client CloudServiceRoleInstancesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServiceRoleInstancesDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServiceRoleInstancesRebuildFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CloudServiceRoleInstancesRebuildFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServiceRoleInstancesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServiceRoleInstancesRebuildFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServiceRoleInstancesRebuildFuture.Result.
-func (future *CloudServiceRoleInstancesRebuildFuture) result(client CloudServiceRoleInstancesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesRebuildFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServiceRoleInstancesRebuildFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServiceRoleInstancesReimageFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CloudServiceRoleInstancesReimageFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServiceRoleInstancesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServiceRoleInstancesReimageFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServiceRoleInstancesReimageFuture.Result.
-func (future *CloudServiceRoleInstancesReimageFuture) result(client CloudServiceRoleInstancesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesReimageFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServiceRoleInstancesReimageFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServiceRoleInstancesRestartFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CloudServiceRoleInstancesRestartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServiceRoleInstancesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServiceRoleInstancesRestartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServiceRoleInstancesRestartFuture.Result.
-func (future *CloudServiceRoleInstancesRestartFuture) result(client CloudServiceRoleInstancesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServiceRoleInstancesRestartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServiceRoleInstancesRestartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServiceRoleListResult ...
-type CloudServiceRoleListResult struct {
- autorest.Response `json:"-"`
- Value *[]CloudServiceRole `json:"value,omitempty"`
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// CloudServiceRoleListResultIterator provides access to a complete listing of CloudServiceRole values.
-type CloudServiceRoleListResultIterator struct {
- i int
- page CloudServiceRoleListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *CloudServiceRoleListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *CloudServiceRoleListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter CloudServiceRoleListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter CloudServiceRoleListResultIterator) Response() CloudServiceRoleListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter CloudServiceRoleListResultIterator) Value() CloudServiceRole {
- if !iter.page.NotDone() {
- return CloudServiceRole{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the CloudServiceRoleListResultIterator type.
-func NewCloudServiceRoleListResultIterator(page CloudServiceRoleListResultPage) CloudServiceRoleListResultIterator {
- return CloudServiceRoleListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (csrlr CloudServiceRoleListResult) IsEmpty() bool {
- return csrlr.Value == nil || len(*csrlr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (csrlr CloudServiceRoleListResult) hasNextLink() bool {
- return csrlr.NextLink != nil && len(*csrlr.NextLink) != 0
-}
-
-// cloudServiceRoleListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (csrlr CloudServiceRoleListResult) cloudServiceRoleListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !csrlr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(csrlr.NextLink)))
-}
-
-// CloudServiceRoleListResultPage contains a page of CloudServiceRole values.
-type CloudServiceRoleListResultPage struct {
- fn func(context.Context, CloudServiceRoleListResult) (CloudServiceRoleListResult, error)
- csrlr CloudServiceRoleListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *CloudServiceRoleListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CloudServiceRoleListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.csrlr)
- if err != nil {
- return err
- }
- page.csrlr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *CloudServiceRoleListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page CloudServiceRoleListResultPage) NotDone() bool {
- return !page.csrlr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page CloudServiceRoleListResultPage) Response() CloudServiceRoleListResult {
- return page.csrlr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page CloudServiceRoleListResultPage) Values() []CloudServiceRole {
- if page.csrlr.IsEmpty() {
- return nil
- }
- return *page.csrlr.Value
-}
-
-// Creates a new instance of the CloudServiceRoleListResultPage type.
-func NewCloudServiceRoleListResultPage(cur CloudServiceRoleListResult, getNextPage func(context.Context, CloudServiceRoleListResult) (CloudServiceRoleListResult, error)) CloudServiceRoleListResultPage {
- return CloudServiceRoleListResultPage{
- fn: getNextPage,
- csrlr: cur,
- }
-}
-
-// CloudServiceRoleProfile describes the role profile for the cloud service.
-type CloudServiceRoleProfile struct {
- // Roles - List of roles for the cloud service.
- Roles *[]CloudServiceRoleProfileProperties `json:"roles,omitempty"`
-}
-
-// CloudServiceRoleProfileProperties describes the role properties.
-type CloudServiceRoleProfileProperties struct {
- // Name - Resource name.
- Name *string `json:"name,omitempty"`
- Sku *CloudServiceRoleSku `json:"sku,omitempty"`
-}
-
-// CloudServiceRoleProperties ...
-type CloudServiceRoleProperties struct {
- // UniqueID - READ-ONLY; Specifies the ID which uniquely identifies a cloud service role.
- UniqueID *string `json:"uniqueId,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CloudServiceRoleProperties.
-func (csrp CloudServiceRoleProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// CloudServiceRoleSku describes the cloud service role sku.
-type CloudServiceRoleSku struct {
- // Name - The sku name. NOTE: If the new SKU is not supported on the hardware the cloud service is currently on, you need to delete and recreate the cloud service or move back to the old sku.
- Name *string `json:"name,omitempty"`
- // Tier - Specifies the tier of the cloud service. Possible Values are
**Standard**
**Basic**
- Tier *string `json:"tier,omitempty"`
- // Capacity - Specifies the number of role instances in the cloud service.
- Capacity *int64 `json:"capacity,omitempty"`
-}
-
-// CloudServicesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CloudServicesCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (CloudService, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesCreateOrUpdateFuture.Result.
-func (future *CloudServicesCreateOrUpdateFuture) result(client CloudServicesClient) (cs CloudService, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- cs.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent {
- cs, err = client.CreateOrUpdateResponder(cs.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesCreateOrUpdateFuture", "Result", cs.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// CloudServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type CloudServicesDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesDeleteFuture.Result.
-func (future *CloudServicesDeleteFuture) result(client CloudServicesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServicesDeleteInstancesFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type CloudServicesDeleteInstancesFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesDeleteInstancesFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesDeleteInstancesFuture.Result.
-func (future *CloudServicesDeleteInstancesFuture) result(client CloudServicesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesDeleteInstancesFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesDeleteInstancesFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServicesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type CloudServicesPowerOffFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesPowerOffFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesPowerOffFuture.Result.
-func (future *CloudServicesPowerOffFuture) result(client CloudServicesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesPowerOffFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesPowerOffFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServicesRebuildFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type CloudServicesRebuildFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesRebuildFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesRebuildFuture.Result.
-func (future *CloudServicesRebuildFuture) result(client CloudServicesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesRebuildFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesRebuildFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServicesReimageFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type CloudServicesReimageFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesReimageFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesReimageFuture.Result.
-func (future *CloudServicesReimageFuture) result(client CloudServicesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesReimageFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesReimageFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServicesRestartFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type CloudServicesRestartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesRestartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesRestartFuture.Result.
-func (future *CloudServicesRestartFuture) result(client CloudServicesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesRestartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesRestartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServicesStartFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type CloudServicesStartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesStartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesStartFuture.Result.
-func (future *CloudServicesStartFuture) result(client CloudServicesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesStartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesStartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServicesUpdateDomainWalkUpdateDomainFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type CloudServicesUpdateDomainWalkUpdateDomainFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesUpdateDomainClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesUpdateDomainWalkUpdateDomainFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesUpdateDomainWalkUpdateDomainFuture.Result.
-func (future *CloudServicesUpdateDomainWalkUpdateDomainFuture) result(client CloudServicesUpdateDomainClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateDomainWalkUpdateDomainFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesUpdateDomainWalkUpdateDomainFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// CloudServicesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type CloudServicesUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(CloudServicesClient) (CloudService, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *CloudServicesUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for CloudServicesUpdateFuture.Result.
-func (future *CloudServicesUpdateFuture) result(client CloudServicesClient) (cs CloudService, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- cs.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.CloudServicesUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent {
- cs, err = client.UpdateResponder(cs.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.CloudServicesUpdateFuture", "Result", cs.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// CloudServiceUpdate ...
-type CloudServiceUpdate struct {
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for CloudServiceUpdate.
-func (csu CloudServiceUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if csu.Tags != nil {
- objectMap["tags"] = csu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// CloudServiceVaultAndSecretReference ...
-type CloudServiceVaultAndSecretReference struct {
- SourceVault *SubResource `json:"sourceVault,omitempty"`
- SecretURL *string `json:"secretUrl,omitempty"`
-}
-
-// CloudServiceVaultCertificate describes a single certificate reference in a Key Vault, and where the
-// certificate should reside on the role instance.
-type CloudServiceVaultCertificate struct {
- // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret.
- CertificateURL *string `json:"certificateUrl,omitempty"`
-}
-
-// CloudServiceVaultSecretGroup describes a set of certificates which are all in the same Key Vault.
-type CloudServiceVaultSecretGroup struct {
- // SourceVault - The relative URL of the Key Vault containing all of the certificates in VaultCertificates.
- SourceVault *SubResource `json:"sourceVault,omitempty"`
- // VaultCertificates - The list of key vault references in SourceVault which contain certificates.
- VaultCertificates *[]CloudServiceVaultCertificate `json:"vaultCertificates,omitempty"`
-}
-
-// CommunityGallery specifies information about the Community Gallery that you want to create or update.
-type CommunityGallery struct {
- autorest.Response `json:"-"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- *CommunityGalleryIdentifier `json:"identifier,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CommunityGallery.
-func (cg CommunityGallery) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if cg.CommunityGalleryIdentifier != nil {
- objectMap["identifier"] = cg.CommunityGalleryIdentifier
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for CommunityGallery struct.
-func (cg *CommunityGallery) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- cg.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- cg.Location = &location
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- cg.Type = &typeVar
- }
- case "identifier":
- if v != nil {
- var communityGalleryIdentifier CommunityGalleryIdentifier
- err = json.Unmarshal(*v, &communityGalleryIdentifier)
- if err != nil {
- return err
- }
- cg.CommunityGalleryIdentifier = &communityGalleryIdentifier
- }
- }
- }
-
- return nil
-}
-
-// CommunityGalleryIdentifier the identifier information of community gallery.
-type CommunityGalleryIdentifier struct {
- // UniqueID - The unique id of this community gallery.
- UniqueID *string `json:"uniqueId,omitempty"`
-}
-
-// CommunityGalleryImage specifies information about the gallery image definition that you want to create
-// or update.
-type CommunityGalleryImage struct {
- autorest.Response `json:"-"`
- *CommunityGalleryImageProperties `json:"properties,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- *CommunityGalleryIdentifier `json:"identifier,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CommunityGalleryImage.
-func (cgiVar CommunityGalleryImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if cgiVar.CommunityGalleryImageProperties != nil {
- objectMap["properties"] = cgiVar.CommunityGalleryImageProperties
- }
- if cgiVar.CommunityGalleryIdentifier != nil {
- objectMap["identifier"] = cgiVar.CommunityGalleryIdentifier
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for CommunityGalleryImage struct.
-func (cgiVar *CommunityGalleryImage) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var communityGalleryImageProperties CommunityGalleryImageProperties
- err = json.Unmarshal(*v, &communityGalleryImageProperties)
- if err != nil {
- return err
- }
- cgiVar.CommunityGalleryImageProperties = &communityGalleryImageProperties
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- cgiVar.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- cgiVar.Location = &location
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- cgiVar.Type = &typeVar
- }
- case "identifier":
- if v != nil {
- var communityGalleryIdentifier CommunityGalleryIdentifier
- err = json.Unmarshal(*v, &communityGalleryIdentifier)
- if err != nil {
- return err
- }
- cgiVar.CommunityGalleryIdentifier = &communityGalleryIdentifier
- }
- }
- }
-
- return nil
-}
-
-// CommunityGalleryImageProperties describes the properties of a gallery image definition.
-type CommunityGalleryImageProperties struct {
- // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized'
- OsState OperatingSystemStateTypes `json:"osState,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
- Identifier *GalleryImageIdentifier `json:"identifier,omitempty"`
- Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"`
- Disallowed *Disallowed `json:"disallowed,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
- HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
- // Features - A list of gallery image features.
- Features *[]GalleryImageFeature `json:"features,omitempty"`
- PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"`
-}
-
-// CommunityGalleryImageVersion specifies information about the gallery image version that you want to
-// create or update.
-type CommunityGalleryImageVersion struct {
- autorest.Response `json:"-"`
- *CommunityGalleryImageVersionProperties `json:"properties,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- *CommunityGalleryIdentifier `json:"identifier,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CommunityGalleryImageVersion.
-func (cgiv CommunityGalleryImageVersion) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if cgiv.CommunityGalleryImageVersionProperties != nil {
- objectMap["properties"] = cgiv.CommunityGalleryImageVersionProperties
- }
- if cgiv.CommunityGalleryIdentifier != nil {
- objectMap["identifier"] = cgiv.CommunityGalleryIdentifier
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for CommunityGalleryImageVersion struct.
-func (cgiv *CommunityGalleryImageVersion) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var communityGalleryImageVersionProperties CommunityGalleryImageVersionProperties
- err = json.Unmarshal(*v, &communityGalleryImageVersionProperties)
- if err != nil {
- return err
- }
- cgiv.CommunityGalleryImageVersionProperties = &communityGalleryImageVersionProperties
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- cgiv.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- cgiv.Location = &location
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- cgiv.Type = &typeVar
- }
- case "identifier":
- if v != nil {
- var communityGalleryIdentifier CommunityGalleryIdentifier
- err = json.Unmarshal(*v, &communityGalleryIdentifier)
- if err != nil {
- return err
- }
- cgiv.CommunityGalleryIdentifier = &communityGalleryIdentifier
- }
- }
- }
-
- return nil
-}
-
-// CommunityGalleryImageVersionProperties describes the properties of a gallery image version.
-type CommunityGalleryImageVersionProperties struct {
- // PublishedDate - The published date of the gallery image version Definition. This property can be used for decommissioning purposes. This property is updatable.
- PublishedDate *date.Time `json:"publishedDate,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery image version Definition. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
-}
-
-// CreationData data used when creating a disk.
-type CreationData struct {
- // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'DiskCreateOptionEmpty', 'DiskCreateOptionAttach', 'DiskCreateOptionFromImage', 'DiskCreateOptionImport', 'DiskCreateOptionCopy', 'DiskCreateOptionRestore', 'DiskCreateOptionUpload', 'DiskCreateOptionCopyStart'
- CreateOption DiskCreateOption `json:"createOption,omitempty"`
- // StorageAccountID - Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk.
- StorageAccountID *string `json:"storageAccountId,omitempty"`
- // ImageReference - Disk source information.
- ImageReference *ImageDiskReference `json:"imageReference,omitempty"`
- // GalleryImageReference - Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk.
- GalleryImageReference *ImageDiskReference `json:"galleryImageReference,omitempty"`
- // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk.
- SourceURI *string `json:"sourceUri,omitempty"`
- // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk.
- SourceResourceID *string `json:"sourceResourceId,omitempty"`
- // SourceUniqueID - READ-ONLY; If this field is set, this is the unique id identifying the source of this resource.
- SourceUniqueID *string `json:"sourceUniqueId,omitempty"`
- // UploadSizeBytes - If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
- UploadSizeBytes *int64 `json:"uploadSizeBytes,omitempty"`
- // LogicalSectorSize - Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.
- LogicalSectorSize *int32 `json:"logicalSectorSize,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for CreationData.
-func (cd CreationData) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if cd.CreateOption != "" {
- objectMap["createOption"] = cd.CreateOption
- }
- if cd.StorageAccountID != nil {
- objectMap["storageAccountId"] = cd.StorageAccountID
- }
- if cd.ImageReference != nil {
- objectMap["imageReference"] = cd.ImageReference
- }
- if cd.GalleryImageReference != nil {
- objectMap["galleryImageReference"] = cd.GalleryImageReference
- }
- if cd.SourceURI != nil {
- objectMap["sourceUri"] = cd.SourceURI
- }
- if cd.SourceResourceID != nil {
- objectMap["sourceResourceId"] = cd.SourceResourceID
- }
- if cd.UploadSizeBytes != nil {
- objectMap["uploadSizeBytes"] = cd.UploadSizeBytes
- }
- if cd.LogicalSectorSize != nil {
- objectMap["logicalSectorSize"] = cd.LogicalSectorSize
- }
- return json.Marshal(objectMap)
-}
-
-// DataDisk describes a data disk.
-type DataDisk struct {
- // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
- Lun *int32 `json:"lun,omitempty"`
- // Name - The disk name.
- Name *string `json:"name,omitempty"`
- // Vhd - The virtual hard disk.
- Vhd *VirtualHardDisk `json:"vhd,omitempty"`
- // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist.
- Image *VirtualHardDisk `json:"image,omitempty"`
- // Caching - Specifies the caching requirements.
Possible values are:
**None**
**ReadOnly**
**ReadWrite**
Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk.
- WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
- // CreateOption - Specifies how the virtual machine should be created.
Possible values are:
**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.
**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach'
- CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
- // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // ManagedDisk - The managed disk parameters.
- ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
- // ToBeDetached - Specifies whether the data disk is in process of detachment from the VirtualMachine/VirtualMachineScaleset
- ToBeDetached *bool `json:"toBeDetached,omitempty"`
- // DiskIOPSReadWrite - READ-ONLY; Specifies the Read-Write IOPS for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set.
- DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"`
- // DiskMBpsReadWrite - READ-ONLY; Specifies the bandwidth in MB per second for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine Scale Set.
- DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"`
- // DetachOption - Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values: **ForceDetach**.
detachOption: **ForceDetach** is applicable only for managed data disks. If a previous detachment attempt of the data disk did not complete due to an unexpected failure from the virtual machine and the disk is still not released then use force-detach as a last resort option to detach the disk forcibly from the VM. All writes might not have been flushed when using this detach behavior.
This feature is still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. Possible values include: 'DiskDetachOptionTypesForceDetach'
- DetachOption DiskDetachOptionTypes `json:"detachOption,omitempty"`
- // DeleteOption - Specifies whether data disk should be deleted or detached upon VM deletion.
Possible values:
**Delete** If this value is used, the data disk is deleted when VM is deleted.
**Detach** If this value is used, the data disk is retained after VM is deleted.
The default value is set to **detach**. Possible values include: 'DiskDeleteOptionTypesDelete', 'DiskDeleteOptionTypesDetach'
- DeleteOption DiskDeleteOptionTypes `json:"deleteOption,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DataDisk.
-func (dd DataDisk) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dd.Lun != nil {
- objectMap["lun"] = dd.Lun
- }
- if dd.Name != nil {
- objectMap["name"] = dd.Name
- }
- if dd.Vhd != nil {
- objectMap["vhd"] = dd.Vhd
- }
- if dd.Image != nil {
- objectMap["image"] = dd.Image
- }
- if dd.Caching != "" {
- objectMap["caching"] = dd.Caching
- }
- if dd.WriteAcceleratorEnabled != nil {
- objectMap["writeAcceleratorEnabled"] = dd.WriteAcceleratorEnabled
- }
- if dd.CreateOption != "" {
- objectMap["createOption"] = dd.CreateOption
- }
- if dd.DiskSizeGB != nil {
- objectMap["diskSizeGB"] = dd.DiskSizeGB
- }
- if dd.ManagedDisk != nil {
- objectMap["managedDisk"] = dd.ManagedDisk
- }
- if dd.ToBeDetached != nil {
- objectMap["toBeDetached"] = dd.ToBeDetached
- }
- if dd.DetachOption != "" {
- objectMap["detachOption"] = dd.DetachOption
- }
- if dd.DeleteOption != "" {
- objectMap["deleteOption"] = dd.DeleteOption
- }
- return json.Marshal(objectMap)
-}
-
-// DataDiskImage contains the data disk images information.
-type DataDiskImage struct {
- // Lun - READ-ONLY; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
- Lun *int32 `json:"lun,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DataDiskImage.
-func (ddi DataDiskImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// DataDiskImageEncryption contains encryption settings for a data disk image.
-type DataDiskImageEncryption struct {
- // Lun - This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
- Lun *int32 `json:"lun,omitempty"`
- // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set.
- DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"`
-}
-
-// DedicatedHost specifies information about the Dedicated host.
-type DedicatedHost struct {
- autorest.Response `json:"-"`
- *DedicatedHostProperties `json:"properties,omitempty"`
- // Sku - SKU of the dedicated host for Hardware Generation and VM family. Only name is required to be set. List Microsoft.Compute SKUs for a list of possible values.
- Sku *Sku `json:"sku,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for DedicatedHost.
-func (dh DedicatedHost) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dh.DedicatedHostProperties != nil {
- objectMap["properties"] = dh.DedicatedHostProperties
- }
- if dh.Sku != nil {
- objectMap["sku"] = dh.Sku
- }
- if dh.Location != nil {
- objectMap["location"] = dh.Location
- }
- if dh.Tags != nil {
- objectMap["tags"] = dh.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DedicatedHost struct.
-func (dh *DedicatedHost) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var dedicatedHostProperties DedicatedHostProperties
- err = json.Unmarshal(*v, &dedicatedHostProperties)
- if err != nil {
- return err
- }
- dh.DedicatedHostProperties = &dedicatedHostProperties
- }
- case "sku":
- if v != nil {
- var sku Sku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- dh.Sku = &sku
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- dh.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- dh.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- dh.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- dh.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- dh.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// DedicatedHostAllocatableVM represents the dedicated host unutilized capacity in terms of a specific VM
-// size.
-type DedicatedHostAllocatableVM struct {
- // VMSize - VM size in terms of which the unutilized capacity is represented.
- VMSize *string `json:"vmSize,omitempty"`
- // Count - Maximum number of VMs of size vmSize that can fit in the dedicated host's remaining capacity.
- Count *float64 `json:"count,omitempty"`
-}
-
-// DedicatedHostAvailableCapacity dedicated host unutilized capacity.
-type DedicatedHostAvailableCapacity struct {
- // AllocatableVMs - The unutilized capacity of the dedicated host represented in terms of each VM size that is allowed to be deployed to the dedicated host.
- AllocatableVMs *[]DedicatedHostAllocatableVM `json:"allocatableVMs,omitempty"`
-}
-
-// DedicatedHostGroup specifies information about the dedicated host group that the dedicated hosts should
-// be assigned to.
Currently, a dedicated host can only be added to a dedicated host group at
-// creation time. An existing dedicated host cannot be added to another dedicated host group.
-type DedicatedHostGroup struct {
- autorest.Response `json:"-"`
- *DedicatedHostGroupProperties `json:"properties,omitempty"`
- // Zones - Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only during creation. If not provided, the group supports all zones in the region. If provided, enforces each host in the group to be in the same zone.
- Zones *[]string `json:"zones,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for DedicatedHostGroup.
-func (dhg DedicatedHostGroup) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dhg.DedicatedHostGroupProperties != nil {
- objectMap["properties"] = dhg.DedicatedHostGroupProperties
- }
- if dhg.Zones != nil {
- objectMap["zones"] = dhg.Zones
- }
- if dhg.Location != nil {
- objectMap["location"] = dhg.Location
- }
- if dhg.Tags != nil {
- objectMap["tags"] = dhg.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DedicatedHostGroup struct.
-func (dhg *DedicatedHostGroup) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var dedicatedHostGroupProperties DedicatedHostGroupProperties
- err = json.Unmarshal(*v, &dedicatedHostGroupProperties)
- if err != nil {
- return err
- }
- dhg.DedicatedHostGroupProperties = &dedicatedHostGroupProperties
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- dhg.Zones = &zones
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- dhg.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- dhg.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- dhg.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- dhg.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- dhg.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// DedicatedHostGroupInstanceView ...
-type DedicatedHostGroupInstanceView struct {
- // Hosts - List of instance view of the dedicated hosts under the dedicated host group.
- Hosts *[]DedicatedHostInstanceViewWithName `json:"hosts,omitempty"`
-}
-
-// DedicatedHostGroupListResult the List Dedicated Host Group with resource group response.
-type DedicatedHostGroupListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of dedicated host groups
- Value *[]DedicatedHostGroup `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of Dedicated Host Groups. Call ListNext() with this URI to fetch the next page of Dedicated Host Groups.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// DedicatedHostGroupListResultIterator provides access to a complete listing of DedicatedHostGroup values.
-type DedicatedHostGroupListResultIterator struct {
- i int
- page DedicatedHostGroupListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *DedicatedHostGroupListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *DedicatedHostGroupListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter DedicatedHostGroupListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter DedicatedHostGroupListResultIterator) Response() DedicatedHostGroupListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter DedicatedHostGroupListResultIterator) Value() DedicatedHostGroup {
- if !iter.page.NotDone() {
- return DedicatedHostGroup{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the DedicatedHostGroupListResultIterator type.
-func NewDedicatedHostGroupListResultIterator(page DedicatedHostGroupListResultPage) DedicatedHostGroupListResultIterator {
- return DedicatedHostGroupListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (dhglr DedicatedHostGroupListResult) IsEmpty() bool {
- return dhglr.Value == nil || len(*dhglr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (dhglr DedicatedHostGroupListResult) hasNextLink() bool {
- return dhglr.NextLink != nil && len(*dhglr.NextLink) != 0
-}
-
-// dedicatedHostGroupListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (dhglr DedicatedHostGroupListResult) dedicatedHostGroupListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !dhglr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(dhglr.NextLink)))
-}
-
-// DedicatedHostGroupListResultPage contains a page of DedicatedHostGroup values.
-type DedicatedHostGroupListResultPage struct {
- fn func(context.Context, DedicatedHostGroupListResult) (DedicatedHostGroupListResult, error)
- dhglr DedicatedHostGroupListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *DedicatedHostGroupListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostGroupListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.dhglr)
- if err != nil {
- return err
- }
- page.dhglr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *DedicatedHostGroupListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page DedicatedHostGroupListResultPage) NotDone() bool {
- return !page.dhglr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page DedicatedHostGroupListResultPage) Response() DedicatedHostGroupListResult {
- return page.dhglr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page DedicatedHostGroupListResultPage) Values() []DedicatedHostGroup {
- if page.dhglr.IsEmpty() {
- return nil
- }
- return *page.dhglr.Value
-}
-
-// Creates a new instance of the DedicatedHostGroupListResultPage type.
-func NewDedicatedHostGroupListResultPage(cur DedicatedHostGroupListResult, getNextPage func(context.Context, DedicatedHostGroupListResult) (DedicatedHostGroupListResult, error)) DedicatedHostGroupListResultPage {
- return DedicatedHostGroupListResultPage{
- fn: getNextPage,
- dhglr: cur,
- }
-}
-
-// DedicatedHostGroupProperties dedicated Host Group Properties.
-type DedicatedHostGroupProperties struct {
- // PlatformFaultDomainCount - Number of fault domains that the host group can span.
- PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"`
- // Hosts - READ-ONLY; A list of references to all dedicated hosts in the dedicated host group.
- Hosts *[]SubResourceReadOnly `json:"hosts,omitempty"`
- // InstanceView - READ-ONLY; The dedicated host group instance view, which has the list of instance view of the dedicated hosts under the dedicated host group.
- InstanceView *DedicatedHostGroupInstanceView `json:"instanceView,omitempty"`
- // SupportAutomaticPlacement - Specifies whether virtual machines or virtual machine scale sets can be placed automatically on the dedicated host group. Automatic placement means resources are allocated on dedicated hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to 'false' when not provided.
Minimum api-version: 2020-06-01.
- SupportAutomaticPlacement *bool `json:"supportAutomaticPlacement,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DedicatedHostGroupProperties.
-func (dhgp DedicatedHostGroupProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dhgp.PlatformFaultDomainCount != nil {
- objectMap["platformFaultDomainCount"] = dhgp.PlatformFaultDomainCount
- }
- if dhgp.SupportAutomaticPlacement != nil {
- objectMap["supportAutomaticPlacement"] = dhgp.SupportAutomaticPlacement
- }
- return json.Marshal(objectMap)
-}
-
-// DedicatedHostGroupUpdate specifies information about the dedicated host group that the dedicated host
-// should be assigned to. Only tags may be updated.
-type DedicatedHostGroupUpdate struct {
- *DedicatedHostGroupProperties `json:"properties,omitempty"`
- // Zones - Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only during creation. If not provided, the group supports all zones in the region. If provided, enforces each host in the group to be in the same zone.
- Zones *[]string `json:"zones,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for DedicatedHostGroupUpdate.
-func (dhgu DedicatedHostGroupUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dhgu.DedicatedHostGroupProperties != nil {
- objectMap["properties"] = dhgu.DedicatedHostGroupProperties
- }
- if dhgu.Zones != nil {
- objectMap["zones"] = dhgu.Zones
- }
- if dhgu.Tags != nil {
- objectMap["tags"] = dhgu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DedicatedHostGroupUpdate struct.
-func (dhgu *DedicatedHostGroupUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var dedicatedHostGroupProperties DedicatedHostGroupProperties
- err = json.Unmarshal(*v, &dedicatedHostGroupProperties)
- if err != nil {
- return err
- }
- dhgu.DedicatedHostGroupProperties = &dedicatedHostGroupProperties
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- dhgu.Zones = &zones
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- dhgu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// DedicatedHostInstanceView the instance view of a dedicated host.
-type DedicatedHostInstanceView struct {
- // AssetID - READ-ONLY; Specifies the unique id of the dedicated physical machine on which the dedicated host resides.
- AssetID *string `json:"assetId,omitempty"`
- // AvailableCapacity - Unutilized capacity of the dedicated host.
- AvailableCapacity *DedicatedHostAvailableCapacity `json:"availableCapacity,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DedicatedHostInstanceView.
-func (dhiv DedicatedHostInstanceView) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dhiv.AvailableCapacity != nil {
- objectMap["availableCapacity"] = dhiv.AvailableCapacity
- }
- if dhiv.Statuses != nil {
- objectMap["statuses"] = dhiv.Statuses
- }
- return json.Marshal(objectMap)
-}
-
-// DedicatedHostInstanceViewWithName the instance view of a dedicated host that includes the name of the
-// dedicated host. It is used for the response to the instance view of a dedicated host group.
-type DedicatedHostInstanceViewWithName struct {
- // Name - READ-ONLY; The name of the dedicated host.
- Name *string `json:"name,omitempty"`
- // AssetID - READ-ONLY; Specifies the unique id of the dedicated physical machine on which the dedicated host resides.
- AssetID *string `json:"assetId,omitempty"`
- // AvailableCapacity - Unutilized capacity of the dedicated host.
- AvailableCapacity *DedicatedHostAvailableCapacity `json:"availableCapacity,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DedicatedHostInstanceViewWithName.
-func (dhivwn DedicatedHostInstanceViewWithName) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dhivwn.AvailableCapacity != nil {
- objectMap["availableCapacity"] = dhivwn.AvailableCapacity
- }
- if dhivwn.Statuses != nil {
- objectMap["statuses"] = dhivwn.Statuses
- }
- return json.Marshal(objectMap)
-}
-
-// DedicatedHostListResult the list dedicated host operation response.
-type DedicatedHostListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of dedicated hosts
- Value *[]DedicatedHost `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of dedicated hosts. Call ListNext() with this URI to fetch the next page of dedicated hosts.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// DedicatedHostListResultIterator provides access to a complete listing of DedicatedHost values.
-type DedicatedHostListResultIterator struct {
- i int
- page DedicatedHostListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *DedicatedHostListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *DedicatedHostListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter DedicatedHostListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter DedicatedHostListResultIterator) Response() DedicatedHostListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter DedicatedHostListResultIterator) Value() DedicatedHost {
- if !iter.page.NotDone() {
- return DedicatedHost{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the DedicatedHostListResultIterator type.
-func NewDedicatedHostListResultIterator(page DedicatedHostListResultPage) DedicatedHostListResultIterator {
- return DedicatedHostListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (dhlr DedicatedHostListResult) IsEmpty() bool {
- return dhlr.Value == nil || len(*dhlr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (dhlr DedicatedHostListResult) hasNextLink() bool {
- return dhlr.NextLink != nil && len(*dhlr.NextLink) != 0
-}
-
-// dedicatedHostListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (dhlr DedicatedHostListResult) dedicatedHostListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !dhlr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(dhlr.NextLink)))
-}
-
-// DedicatedHostListResultPage contains a page of DedicatedHost values.
-type DedicatedHostListResultPage struct {
- fn func(context.Context, DedicatedHostListResult) (DedicatedHostListResult, error)
- dhlr DedicatedHostListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *DedicatedHostListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DedicatedHostListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.dhlr)
- if err != nil {
- return err
- }
- page.dhlr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *DedicatedHostListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page DedicatedHostListResultPage) NotDone() bool {
- return !page.dhlr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page DedicatedHostListResultPage) Response() DedicatedHostListResult {
- return page.dhlr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page DedicatedHostListResultPage) Values() []DedicatedHost {
- if page.dhlr.IsEmpty() {
- return nil
- }
- return *page.dhlr.Value
-}
-
-// Creates a new instance of the DedicatedHostListResultPage type.
-func NewDedicatedHostListResultPage(cur DedicatedHostListResult, getNextPage func(context.Context, DedicatedHostListResult) (DedicatedHostListResult, error)) DedicatedHostListResultPage {
- return DedicatedHostListResultPage{
- fn: getNextPage,
- dhlr: cur,
- }
-}
-
-// DedicatedHostProperties properties of the dedicated host.
-type DedicatedHostProperties struct {
- // PlatformFaultDomain - Fault domain of the dedicated host within a dedicated host group.
- PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"`
- // AutoReplaceOnFailure - Specifies whether the dedicated host should be replaced automatically in case of a failure. The value is defaulted to 'true' when not provided.
- AutoReplaceOnFailure *bool `json:"autoReplaceOnFailure,omitempty"`
- // HostID - READ-ONLY; A unique id generated and assigned to the dedicated host by the platform.
Does not change throughout the lifetime of the host.
- HostID *string `json:"hostId,omitempty"`
- // VirtualMachines - READ-ONLY; A list of references to all virtual machines in the Dedicated Host.
- VirtualMachines *[]SubResourceReadOnly `json:"virtualMachines,omitempty"`
- // LicenseType - Specifies the software license type that will be applied to the VMs deployed on the dedicated host.
Possible values are:
**None**
**Windows_Server_Hybrid**
**Windows_Server_Perpetual**
Default: **None**. Possible values include: 'DedicatedHostLicenseTypesNone', 'DedicatedHostLicenseTypesWindowsServerHybrid', 'DedicatedHostLicenseTypesWindowsServerPerpetual'
- LicenseType DedicatedHostLicenseTypes `json:"licenseType,omitempty"`
- // ProvisioningTime - READ-ONLY; The date when the host was first provisioned.
- ProvisioningTime *date.Time `json:"provisioningTime,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // InstanceView - READ-ONLY; The dedicated host instance view.
- InstanceView *DedicatedHostInstanceView `json:"instanceView,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DedicatedHostProperties.
-func (dhp DedicatedHostProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dhp.PlatformFaultDomain != nil {
- objectMap["platformFaultDomain"] = dhp.PlatformFaultDomain
- }
- if dhp.AutoReplaceOnFailure != nil {
- objectMap["autoReplaceOnFailure"] = dhp.AutoReplaceOnFailure
- }
- if dhp.LicenseType != "" {
- objectMap["licenseType"] = dhp.LicenseType
- }
- return json.Marshal(objectMap)
-}
-
-// DedicatedHostsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type DedicatedHostsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DedicatedHostsClient) (DedicatedHost, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DedicatedHostsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DedicatedHostsCreateOrUpdateFuture.Result.
-func (future *DedicatedHostsCreateOrUpdateFuture) result(client DedicatedHostsClient) (dh DedicatedHost, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- dh.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if dh.Response.Response, err = future.GetResult(sender); err == nil && dh.Response.Response.StatusCode != http.StatusNoContent {
- dh, err = client.CreateOrUpdateResponder(dh.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsCreateOrUpdateFuture", "Result", dh.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DedicatedHostsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type DedicatedHostsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DedicatedHostsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DedicatedHostsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DedicatedHostsDeleteFuture.Result.
-func (future *DedicatedHostsDeleteFuture) result(client DedicatedHostsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// DedicatedHostsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type DedicatedHostsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DedicatedHostsClient) (DedicatedHost, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DedicatedHostsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DedicatedHostsUpdateFuture.Result.
-func (future *DedicatedHostsUpdateFuture) result(client DedicatedHostsClient) (dh DedicatedHost, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- dh.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DedicatedHostsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if dh.Response.Response, err = future.GetResult(sender); err == nil && dh.Response.Response.StatusCode != http.StatusNoContent {
- dh, err = client.UpdateResponder(dh.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DedicatedHostsUpdateFuture", "Result", dh.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DedicatedHostUpdate specifies information about the dedicated host. Only tags, autoReplaceOnFailure and
-// licenseType may be updated.
-type DedicatedHostUpdate struct {
- *DedicatedHostProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for DedicatedHostUpdate.
-func (dhu DedicatedHostUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dhu.DedicatedHostProperties != nil {
- objectMap["properties"] = dhu.DedicatedHostProperties
- }
- if dhu.Tags != nil {
- objectMap["tags"] = dhu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DedicatedHostUpdate struct.
-func (dhu *DedicatedHostUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var dedicatedHostProperties DedicatedHostProperties
- err = json.Unmarshal(*v, &dedicatedHostProperties)
- if err != nil {
- return err
- }
- dhu.DedicatedHostProperties = &dedicatedHostProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- dhu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// DiagnosticsProfile specifies the boot diagnostic settings state.
Minimum api-version:
-// 2015-06-15.
-type DiagnosticsProfile struct {
- // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.
You can easily view the output of your console log.
Azure also enables you to see a screenshot of the VM from the hypervisor.
- BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"`
-}
-
-// DiffDiskSettings describes the parameters of ephemeral disk settings that can be specified for operating
-// system disk.
NOTE: The ephemeral disk settings can only be specified for managed disk.
-type DiffDiskSettings struct {
- // Option - Specifies the ephemeral disk settings for operating system disk. Possible values include: 'DiffDiskOptionsLocal'
- Option DiffDiskOptions `json:"option,omitempty"`
- // Placement - Specifies the ephemeral disk placement for operating system disk.
Possible values are:
**CacheDisk**
**ResourceDisk**
Default: **CacheDisk** if one is configured for the VM size otherwise **ResourceDisk** is used.
Refer to VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Possible values include: 'DiffDiskPlacementCacheDisk', 'DiffDiskPlacementResourceDisk'
- Placement DiffDiskPlacement `json:"placement,omitempty"`
-}
-
-// Disallowed describes the disallowed disk types.
-type Disallowed struct {
- // DiskTypes - A list of disk types.
- DiskTypes *[]string `json:"diskTypes,omitempty"`
-}
-
-// DisallowedConfiguration specifies the disallowed configuration for a virtual machine image.
-type DisallowedConfiguration struct {
- // VMDiskType - VM disk types which are disallowed. Possible values include: 'VMDiskTypesNone', 'VMDiskTypesUnmanaged'
- VMDiskType VMDiskTypes `json:"vmDiskType,omitempty"`
-}
-
-// Disk disk resource.
-type Disk struct {
- autorest.Response `json:"-"`
- // ManagedBy - READ-ONLY; A relative URI containing the ID of the VM that has the disk attached.
- ManagedBy *string `json:"managedBy,omitempty"`
- // ManagedByExtended - READ-ONLY; List of relative URIs containing the IDs of the VMs that have the disk attached. maxShares should be set to a value greater than one for disks to allow attaching them to multiple VMs.
- ManagedByExtended *[]string `json:"managedByExtended,omitempty"`
- Sku *DiskSku `json:"sku,omitempty"`
- // Zones - The Logical zone list for Disk.
- Zones *[]string `json:"zones,omitempty"`
- // ExtendedLocation - The extended location where the disk will be created. Extended location cannot be changed.
- ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
- *DiskProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for Disk.
-func (d Disk) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if d.Sku != nil {
- objectMap["sku"] = d.Sku
- }
- if d.Zones != nil {
- objectMap["zones"] = d.Zones
- }
- if d.ExtendedLocation != nil {
- objectMap["extendedLocation"] = d.ExtendedLocation
- }
- if d.DiskProperties != nil {
- objectMap["properties"] = d.DiskProperties
- }
- if d.Location != nil {
- objectMap["location"] = d.Location
- }
- if d.Tags != nil {
- objectMap["tags"] = d.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for Disk struct.
-func (d *Disk) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "managedBy":
- if v != nil {
- var managedBy string
- err = json.Unmarshal(*v, &managedBy)
- if err != nil {
- return err
- }
- d.ManagedBy = &managedBy
- }
- case "managedByExtended":
- if v != nil {
- var managedByExtended []string
- err = json.Unmarshal(*v, &managedByExtended)
- if err != nil {
- return err
- }
- d.ManagedByExtended = &managedByExtended
- }
- case "sku":
- if v != nil {
- var sku DiskSku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- d.Sku = &sku
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- d.Zones = &zones
- }
- case "extendedLocation":
- if v != nil {
- var extendedLocation ExtendedLocation
- err = json.Unmarshal(*v, &extendedLocation)
- if err != nil {
- return err
- }
- d.ExtendedLocation = &extendedLocation
- }
- case "properties":
- if v != nil {
- var diskProperties DiskProperties
- err = json.Unmarshal(*v, &diskProperties)
- if err != nil {
- return err
- }
- d.DiskProperties = &diskProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- d.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- d.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- d.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- d.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- d.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// DiskAccess disk access resource.
-type DiskAccess struct {
- autorest.Response `json:"-"`
- *DiskAccessProperties `json:"properties,omitempty"`
- // ExtendedLocation - The extended location where the disk access will be created. Extended location cannot be changed.
- ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for DiskAccess.
-func (da DiskAccess) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if da.DiskAccessProperties != nil {
- objectMap["properties"] = da.DiskAccessProperties
- }
- if da.ExtendedLocation != nil {
- objectMap["extendedLocation"] = da.ExtendedLocation
- }
- if da.Location != nil {
- objectMap["location"] = da.Location
- }
- if da.Tags != nil {
- objectMap["tags"] = da.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DiskAccess struct.
-func (da *DiskAccess) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var diskAccessProperties DiskAccessProperties
- err = json.Unmarshal(*v, &diskAccessProperties)
- if err != nil {
- return err
- }
- da.DiskAccessProperties = &diskAccessProperties
- }
- case "extendedLocation":
- if v != nil {
- var extendedLocation ExtendedLocation
- err = json.Unmarshal(*v, &extendedLocation)
- if err != nil {
- return err
- }
- da.ExtendedLocation = &extendedLocation
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- da.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- da.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- da.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- da.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- da.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// DiskAccessesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type DiskAccessesCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskAccessesClient) (DiskAccess, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskAccessesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskAccessesCreateOrUpdateFuture.Result.
-func (future *DiskAccessesCreateOrUpdateFuture) result(client DiskAccessesClient) (da DiskAccess, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- da.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if da.Response.Response, err = future.GetResult(sender); err == nil && da.Response.Response.StatusCode != http.StatusNoContent {
- da, err = client.CreateOrUpdateResponder(da.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesCreateOrUpdateFuture", "Result", da.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DiskAccessesDeleteAPrivateEndpointConnectionFuture an abstraction for monitoring and retrieving the
-// results of a long-running operation.
-type DiskAccessesDeleteAPrivateEndpointConnectionFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskAccessesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskAccessesDeleteAPrivateEndpointConnectionFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskAccessesDeleteAPrivateEndpointConnectionFuture.Result.
-func (future *DiskAccessesDeleteAPrivateEndpointConnectionFuture) result(client DiskAccessesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesDeleteAPrivateEndpointConnectionFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesDeleteAPrivateEndpointConnectionFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// DiskAccessesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type DiskAccessesDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskAccessesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskAccessesDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskAccessesDeleteFuture.Result.
-func (future *DiskAccessesDeleteFuture) result(client DiskAccessesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// DiskAccessesUpdateAPrivateEndpointConnectionFuture an abstraction for monitoring and retrieving the
-// results of a long-running operation.
-type DiskAccessesUpdateAPrivateEndpointConnectionFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskAccessesClient) (PrivateEndpointConnection, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskAccessesUpdateAPrivateEndpointConnectionFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskAccessesUpdateAPrivateEndpointConnectionFuture.Result.
-func (future *DiskAccessesUpdateAPrivateEndpointConnectionFuture) result(client DiskAccessesClient) (pec PrivateEndpointConnection, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- pec.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if pec.Response.Response, err = future.GetResult(sender); err == nil && pec.Response.Response.StatusCode != http.StatusNoContent {
- pec, err = client.UpdateAPrivateEndpointConnectionResponder(pec.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateAPrivateEndpointConnectionFuture", "Result", pec.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DiskAccessesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type DiskAccessesUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskAccessesClient) (DiskAccess, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskAccessesUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskAccessesUpdateFuture.Result.
-func (future *DiskAccessesUpdateFuture) result(client DiskAccessesClient) (da DiskAccess, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- da.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskAccessesUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if da.Response.Response, err = future.GetResult(sender); err == nil && da.Response.Response.StatusCode != http.StatusNoContent {
- da, err = client.UpdateResponder(da.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskAccessesUpdateFuture", "Result", da.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DiskAccessList the List disk access operation response.
-type DiskAccessList struct {
- autorest.Response `json:"-"`
- // Value - A list of disk access resources.
- Value *[]DiskAccess `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of disk access resources. Call ListNext() with this to fetch the next page of disk access resources.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// DiskAccessListIterator provides access to a complete listing of DiskAccess values.
-type DiskAccessListIterator struct {
- i int
- page DiskAccessListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *DiskAccessListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *DiskAccessListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter DiskAccessListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter DiskAccessListIterator) Response() DiskAccessList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter DiskAccessListIterator) Value() DiskAccess {
- if !iter.page.NotDone() {
- return DiskAccess{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the DiskAccessListIterator type.
-func NewDiskAccessListIterator(page DiskAccessListPage) DiskAccessListIterator {
- return DiskAccessListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (dal DiskAccessList) IsEmpty() bool {
- return dal.Value == nil || len(*dal.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (dal DiskAccessList) hasNextLink() bool {
- return dal.NextLink != nil && len(*dal.NextLink) != 0
-}
-
-// diskAccessListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (dal DiskAccessList) diskAccessListPreparer(ctx context.Context) (*http.Request, error) {
- if !dal.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(dal.NextLink)))
-}
-
-// DiskAccessListPage contains a page of DiskAccess values.
-type DiskAccessListPage struct {
- fn func(context.Context, DiskAccessList) (DiskAccessList, error)
- dal DiskAccessList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *DiskAccessListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskAccessListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.dal)
- if err != nil {
- return err
- }
- page.dal = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *DiskAccessListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page DiskAccessListPage) NotDone() bool {
- return !page.dal.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page DiskAccessListPage) Response() DiskAccessList {
- return page.dal
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page DiskAccessListPage) Values() []DiskAccess {
- if page.dal.IsEmpty() {
- return nil
- }
- return *page.dal.Value
-}
-
-// Creates a new instance of the DiskAccessListPage type.
-func NewDiskAccessListPage(cur DiskAccessList, getNextPage func(context.Context, DiskAccessList) (DiskAccessList, error)) DiskAccessListPage {
- return DiskAccessListPage{
- fn: getNextPage,
- dal: cur,
- }
-}
-
-// DiskAccessProperties ...
-type DiskAccessProperties struct {
- // PrivateEndpointConnections - READ-ONLY; A readonly collection of private endpoint connections created on the disk. Currently only one endpoint connection is supported.
- PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"`
- // ProvisioningState - READ-ONLY; The disk access resource provisioning state.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // TimeCreated - READ-ONLY; The time when the disk access was created.
- TimeCreated *date.Time `json:"timeCreated,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DiskAccessProperties.
-func (dap DiskAccessProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// DiskAccessUpdate used for updating a disk access resource.
-type DiskAccessUpdate struct {
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for DiskAccessUpdate.
-func (dau DiskAccessUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dau.Tags != nil {
- objectMap["tags"] = dau.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// DiskEncryptionSet disk encryption set resource.
-type DiskEncryptionSet struct {
- autorest.Response `json:"-"`
- Identity *EncryptionSetIdentity `json:"identity,omitempty"`
- *EncryptionSetProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for DiskEncryptionSet.
-func (desVar DiskEncryptionSet) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if desVar.Identity != nil {
- objectMap["identity"] = desVar.Identity
- }
- if desVar.EncryptionSetProperties != nil {
- objectMap["properties"] = desVar.EncryptionSetProperties
- }
- if desVar.Location != nil {
- objectMap["location"] = desVar.Location
- }
- if desVar.Tags != nil {
- objectMap["tags"] = desVar.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DiskEncryptionSet struct.
-func (desVar *DiskEncryptionSet) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "identity":
- if v != nil {
- var identity EncryptionSetIdentity
- err = json.Unmarshal(*v, &identity)
- if err != nil {
- return err
- }
- desVar.Identity = &identity
- }
- case "properties":
- if v != nil {
- var encryptionSetProperties EncryptionSetProperties
- err = json.Unmarshal(*v, &encryptionSetProperties)
- if err != nil {
- return err
- }
- desVar.EncryptionSetProperties = &encryptionSetProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- desVar.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- desVar.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- desVar.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- desVar.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- desVar.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// DiskEncryptionSetList the List disk encryption set operation response.
-type DiskEncryptionSetList struct {
- autorest.Response `json:"-"`
- // Value - A list of disk encryption sets.
- Value *[]DiskEncryptionSet `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of disk encryption sets. Call ListNext() with this to fetch the next page of disk encryption sets.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// DiskEncryptionSetListIterator provides access to a complete listing of DiskEncryptionSet values.
-type DiskEncryptionSetListIterator struct {
- i int
- page DiskEncryptionSetListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *DiskEncryptionSetListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *DiskEncryptionSetListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter DiskEncryptionSetListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter DiskEncryptionSetListIterator) Response() DiskEncryptionSetList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter DiskEncryptionSetListIterator) Value() DiskEncryptionSet {
- if !iter.page.NotDone() {
- return DiskEncryptionSet{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the DiskEncryptionSetListIterator type.
-func NewDiskEncryptionSetListIterator(page DiskEncryptionSetListPage) DiskEncryptionSetListIterator {
- return DiskEncryptionSetListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (desl DiskEncryptionSetList) IsEmpty() bool {
- return desl.Value == nil || len(*desl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (desl DiskEncryptionSetList) hasNextLink() bool {
- return desl.NextLink != nil && len(*desl.NextLink) != 0
-}
-
-// diskEncryptionSetListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (desl DiskEncryptionSetList) diskEncryptionSetListPreparer(ctx context.Context) (*http.Request, error) {
- if !desl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(desl.NextLink)))
-}
-
-// DiskEncryptionSetListPage contains a page of DiskEncryptionSet values.
-type DiskEncryptionSetListPage struct {
- fn func(context.Context, DiskEncryptionSetList) (DiskEncryptionSetList, error)
- desl DiskEncryptionSetList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *DiskEncryptionSetListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskEncryptionSetListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.desl)
- if err != nil {
- return err
- }
- page.desl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *DiskEncryptionSetListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page DiskEncryptionSetListPage) NotDone() bool {
- return !page.desl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page DiskEncryptionSetListPage) Response() DiskEncryptionSetList {
- return page.desl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page DiskEncryptionSetListPage) Values() []DiskEncryptionSet {
- if page.desl.IsEmpty() {
- return nil
- }
- return *page.desl.Value
-}
-
-// Creates a new instance of the DiskEncryptionSetListPage type.
-func NewDiskEncryptionSetListPage(cur DiskEncryptionSetList, getNextPage func(context.Context, DiskEncryptionSetList) (DiskEncryptionSetList, error)) DiskEncryptionSetListPage {
- return DiskEncryptionSetListPage{
- fn: getNextPage,
- desl: cur,
- }
-}
-
-// DiskEncryptionSetParameters describes the parameter of customer managed disk encryption set resource id
-// that can be specified for disk.
NOTE: The disk encryption set resource id can only be specified
-// for managed disk. Please refer https://aka.ms/mdssewithcmkoverview for more details.
-type DiskEncryptionSetParameters struct {
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// DiskEncryptionSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type DiskEncryptionSetsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskEncryptionSetsClient) (DiskEncryptionSet, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskEncryptionSetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskEncryptionSetsCreateOrUpdateFuture.Result.
-func (future *DiskEncryptionSetsCreateOrUpdateFuture) result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- desVar.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent {
- desVar, err = client.CreateOrUpdateResponder(desVar.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsCreateOrUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DiskEncryptionSetsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type DiskEncryptionSetsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskEncryptionSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskEncryptionSetsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskEncryptionSetsDeleteFuture.Result.
-func (future *DiskEncryptionSetsDeleteFuture) result(client DiskEncryptionSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// DiskEncryptionSetsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type DiskEncryptionSetsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskEncryptionSetsClient) (DiskEncryptionSet, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskEncryptionSetsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskEncryptionSetsUpdateFuture.Result.
-func (future *DiskEncryptionSetsUpdateFuture) result(client DiskEncryptionSetsClient) (desVar DiskEncryptionSet, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- desVar.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskEncryptionSetsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if desVar.Response.Response, err = future.GetResult(sender); err == nil && desVar.Response.Response.StatusCode != http.StatusNoContent {
- desVar, err = client.UpdateResponder(desVar.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskEncryptionSetsUpdateFuture", "Result", desVar.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DiskEncryptionSettings describes a Encryption Settings for a Disk
-type DiskEncryptionSettings struct {
- // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret.
- DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"`
- // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault.
- KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"`
- // Enabled - Specifies whether disk encryption should be enabled on the virtual machine.
- Enabled *bool `json:"enabled,omitempty"`
-}
-
-// DiskEncryptionSetUpdate disk encryption set update resource.
-type DiskEncryptionSetUpdate struct {
- *DiskEncryptionSetUpdateProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
- Identity *EncryptionSetIdentity `json:"identity,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DiskEncryptionSetUpdate.
-func (desu DiskEncryptionSetUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if desu.DiskEncryptionSetUpdateProperties != nil {
- objectMap["properties"] = desu.DiskEncryptionSetUpdateProperties
- }
- if desu.Tags != nil {
- objectMap["tags"] = desu.Tags
- }
- if desu.Identity != nil {
- objectMap["identity"] = desu.Identity
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DiskEncryptionSetUpdate struct.
-func (desu *DiskEncryptionSetUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var diskEncryptionSetUpdateProperties DiskEncryptionSetUpdateProperties
- err = json.Unmarshal(*v, &diskEncryptionSetUpdateProperties)
- if err != nil {
- return err
- }
- desu.DiskEncryptionSetUpdateProperties = &diskEncryptionSetUpdateProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- desu.Tags = tags
- }
- case "identity":
- if v != nil {
- var identity EncryptionSetIdentity
- err = json.Unmarshal(*v, &identity)
- if err != nil {
- return err
- }
- desu.Identity = &identity
- }
- }
- }
-
- return nil
-}
-
-// DiskEncryptionSetUpdateProperties disk encryption set resource update properties.
-type DiskEncryptionSetUpdateProperties struct {
- // EncryptionType - Possible values include: 'DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey', 'DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys'
- EncryptionType DiskEncryptionSetType `json:"encryptionType,omitempty"`
- ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"`
- // RotationToLatestKeyVersionEnabled - Set this flag to true to enable auto-updating of this disk encryption set to the latest key version.
- RotationToLatestKeyVersionEnabled *bool `json:"rotationToLatestKeyVersionEnabled,omitempty"`
-}
-
-// DiskImageEncryption this is the disk image encryption base class.
-type DiskImageEncryption struct {
- // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set.
- DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"`
-}
-
-// DiskInstanceView the instance view of the disk.
-type DiskInstanceView struct {
- // Name - The disk name.
- Name *string `json:"name,omitempty"`
- // EncryptionSettings - Specifies the encryption settings for the OS Disk.
Minimum api-version: 2015-06-15
- EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// DiskList the List Disks operation response.
-type DiskList struct {
- autorest.Response `json:"-"`
- // Value - A list of disks.
- Value *[]Disk `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// DiskListIterator provides access to a complete listing of Disk values.
-type DiskListIterator struct {
- i int
- page DiskListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *DiskListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *DiskListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter DiskListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter DiskListIterator) Response() DiskList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter DiskListIterator) Value() Disk {
- if !iter.page.NotDone() {
- return Disk{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the DiskListIterator type.
-func NewDiskListIterator(page DiskListPage) DiskListIterator {
- return DiskListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (dl DiskList) IsEmpty() bool {
- return dl.Value == nil || len(*dl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (dl DiskList) hasNextLink() bool {
- return dl.NextLink != nil && len(*dl.NextLink) != 0
-}
-
-// diskListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (dl DiskList) diskListPreparer(ctx context.Context) (*http.Request, error) {
- if !dl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(dl.NextLink)))
-}
-
-// DiskListPage contains a page of Disk values.
-type DiskListPage struct {
- fn func(context.Context, DiskList) (DiskList, error)
- dl DiskList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.dl)
- if err != nil {
- return err
- }
- page.dl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *DiskListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page DiskListPage) NotDone() bool {
- return !page.dl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page DiskListPage) Response() DiskList {
- return page.dl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page DiskListPage) Values() []Disk {
- if page.dl.IsEmpty() {
- return nil
- }
- return *page.dl.Value
-}
-
-// Creates a new instance of the DiskListPage type.
-func NewDiskListPage(cur DiskList, getNextPage func(context.Context, DiskList) (DiskList, error)) DiskListPage {
- return DiskListPage{
- fn: getNextPage,
- dl: cur,
- }
-}
-
-// DiskProperties disk resource properties.
-type DiskProperties struct {
- // TimeCreated - READ-ONLY; The time when the disk was created.
- TimeCreated *date.Time `json:"timeCreated,omitempty"`
- // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
- HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
- // PurchasePlan - Purchase plan information for the the image from which the OS disk was created. E.g. - {name: 2019-Datacenter, publisher: MicrosoftWindowsServer, product: WindowsServer}
- PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"`
- // SupportedCapabilities - List of supported capabilities for the image from which the OS disk was created.
- SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"`
- // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created.
- CreationData *CreationData `json:"creationData,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only.
- DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"`
- // UniqueID - READ-ONLY; Unique Guid identifying the resource.
- UniqueID *string `json:"uniqueId,omitempty"`
- // EncryptionSettingsCollection - Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
- EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
- // ProvisioningState - READ-ONLY; The disk provisioning state.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes.
- DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"`
- // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10.
- DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"`
- // DiskIOPSReadOnly - The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes.
- DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"`
- // DiskMBpsReadOnly - The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10.
- DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"`
- // DiskState - The state of the disk. Possible values include: 'DiskStateUnattached', 'DiskStateAttached', 'DiskStateReserved', 'DiskStateFrozen', 'DiskStateActiveSAS', 'DiskStateActiveSASFrozen', 'DiskStateReadyToUpload', 'DiskStateActiveUpload'
- DiskState DiskState `json:"diskState,omitempty"`
- // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
- Encryption *Encryption `json:"encryption,omitempty"`
- // MaxShares - The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
- MaxShares *int32 `json:"maxShares,omitempty"`
- // ShareInfo - READ-ONLY; Details of the list of all VMs that have the disk attached. maxShares should be set to a value greater than one for disks to allow attaching them to multiple VMs.
- ShareInfo *[]ShareInfoElement `json:"shareInfo,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
- NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
- // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
- DiskAccessID *string `json:"diskAccessId,omitempty"`
- // Tier - Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks.
- Tier *string `json:"tier,omitempty"`
- // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. Does not apply to Ultra disks.
- BurstingEnabled *bool `json:"burstingEnabled,omitempty"`
- // PropertyUpdatesInProgress - READ-ONLY; Properties of the disk for which update is pending.
- PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty"`
- // SupportsHibernation - Indicates the OS on a disk supports hibernation.
- SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // SecurityProfile - Contains the security related information for the resource.
- SecurityProfile *DiskSecurityProfile `json:"securityProfile,omitempty"`
- // CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation.
- CompletionPercent *float64 `json:"completionPercent,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
- PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DiskProperties.
-func (dp DiskProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dp.OsType != "" {
- objectMap["osType"] = dp.OsType
- }
- if dp.HyperVGeneration != "" {
- objectMap["hyperVGeneration"] = dp.HyperVGeneration
- }
- if dp.PurchasePlan != nil {
- objectMap["purchasePlan"] = dp.PurchasePlan
- }
- if dp.SupportedCapabilities != nil {
- objectMap["supportedCapabilities"] = dp.SupportedCapabilities
- }
- if dp.CreationData != nil {
- objectMap["creationData"] = dp.CreationData
- }
- if dp.DiskSizeGB != nil {
- objectMap["diskSizeGB"] = dp.DiskSizeGB
- }
- if dp.EncryptionSettingsCollection != nil {
- objectMap["encryptionSettingsCollection"] = dp.EncryptionSettingsCollection
- }
- if dp.DiskIOPSReadWrite != nil {
- objectMap["diskIOPSReadWrite"] = dp.DiskIOPSReadWrite
- }
- if dp.DiskMBpsReadWrite != nil {
- objectMap["diskMBpsReadWrite"] = dp.DiskMBpsReadWrite
- }
- if dp.DiskIOPSReadOnly != nil {
- objectMap["diskIOPSReadOnly"] = dp.DiskIOPSReadOnly
- }
- if dp.DiskMBpsReadOnly != nil {
- objectMap["diskMBpsReadOnly"] = dp.DiskMBpsReadOnly
- }
- if dp.DiskState != "" {
- objectMap["diskState"] = dp.DiskState
- }
- if dp.Encryption != nil {
- objectMap["encryption"] = dp.Encryption
- }
- if dp.MaxShares != nil {
- objectMap["maxShares"] = dp.MaxShares
- }
- if dp.NetworkAccessPolicy != "" {
- objectMap["networkAccessPolicy"] = dp.NetworkAccessPolicy
- }
- if dp.DiskAccessID != nil {
- objectMap["diskAccessId"] = dp.DiskAccessID
- }
- if dp.Tier != nil {
- objectMap["tier"] = dp.Tier
- }
- if dp.BurstingEnabled != nil {
- objectMap["burstingEnabled"] = dp.BurstingEnabled
- }
- if dp.SupportsHibernation != nil {
- objectMap["supportsHibernation"] = dp.SupportsHibernation
- }
- if dp.SecurityProfile != nil {
- objectMap["securityProfile"] = dp.SecurityProfile
- }
- if dp.CompletionPercent != nil {
- objectMap["completionPercent"] = dp.CompletionPercent
- }
- if dp.PublicNetworkAccess != "" {
- objectMap["publicNetworkAccess"] = dp.PublicNetworkAccess
- }
- return json.Marshal(objectMap)
-}
-
-// DiskRestorePoint properties of disk restore point
-type DiskRestorePoint struct {
- autorest.Response `json:"-"`
- *DiskRestorePointProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DiskRestorePoint.
-func (drp DiskRestorePoint) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if drp.DiskRestorePointProperties != nil {
- objectMap["properties"] = drp.DiskRestorePointProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DiskRestorePoint struct.
-func (drp *DiskRestorePoint) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var diskRestorePointProperties DiskRestorePointProperties
- err = json.Unmarshal(*v, &diskRestorePointProperties)
- if err != nil {
- return err
- }
- drp.DiskRestorePointProperties = &diskRestorePointProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- drp.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- drp.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- drp.Type = &typeVar
- }
- }
- }
-
- return nil
-}
-
-// DiskRestorePointGrantAccessFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type DiskRestorePointGrantAccessFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskRestorePointClient) (AccessURI, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskRestorePointGrantAccessFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskRestorePointGrantAccessFuture.Result.
-func (future *DiskRestorePointGrantAccessFuture) result(client DiskRestorePointClient) (au AccessURI, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointGrantAccessFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- au.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskRestorePointGrantAccessFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent {
- au, err = client.GrantAccessResponder(au.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DiskRestorePointList the List Disk Restore Points operation response.
-type DiskRestorePointList struct {
- autorest.Response `json:"-"`
- // Value - A list of disk restore points.
- Value *[]DiskRestorePoint `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of disk restore points. Call ListNext() with this to fetch the next page of disk restore points.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// DiskRestorePointListIterator provides access to a complete listing of DiskRestorePoint values.
-type DiskRestorePointListIterator struct {
- i int
- page DiskRestorePointListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *DiskRestorePointListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *DiskRestorePointListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter DiskRestorePointListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter DiskRestorePointListIterator) Response() DiskRestorePointList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter DiskRestorePointListIterator) Value() DiskRestorePoint {
- if !iter.page.NotDone() {
- return DiskRestorePoint{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the DiskRestorePointListIterator type.
-func NewDiskRestorePointListIterator(page DiskRestorePointListPage) DiskRestorePointListIterator {
- return DiskRestorePointListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (drpl DiskRestorePointList) IsEmpty() bool {
- return drpl.Value == nil || len(*drpl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (drpl DiskRestorePointList) hasNextLink() bool {
- return drpl.NextLink != nil && len(*drpl.NextLink) != 0
-}
-
-// diskRestorePointListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (drpl DiskRestorePointList) diskRestorePointListPreparer(ctx context.Context) (*http.Request, error) {
- if !drpl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(drpl.NextLink)))
-}
-
-// DiskRestorePointListPage contains a page of DiskRestorePoint values.
-type DiskRestorePointListPage struct {
- fn func(context.Context, DiskRestorePointList) (DiskRestorePointList, error)
- drpl DiskRestorePointList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *DiskRestorePointListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/DiskRestorePointListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.drpl)
- if err != nil {
- return err
- }
- page.drpl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *DiskRestorePointListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page DiskRestorePointListPage) NotDone() bool {
- return !page.drpl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page DiskRestorePointListPage) Response() DiskRestorePointList {
- return page.drpl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page DiskRestorePointListPage) Values() []DiskRestorePoint {
- if page.drpl.IsEmpty() {
- return nil
- }
- return *page.drpl.Value
-}
-
-// Creates a new instance of the DiskRestorePointListPage type.
-func NewDiskRestorePointListPage(cur DiskRestorePointList, getNextPage func(context.Context, DiskRestorePointList) (DiskRestorePointList, error)) DiskRestorePointListPage {
- return DiskRestorePointListPage{
- fn: getNextPage,
- drpl: cur,
- }
-}
-
-// DiskRestorePointProperties properties of an incremental disk restore point
-type DiskRestorePointProperties struct {
- // TimeCreated - READ-ONLY; The timestamp of restorePoint creation
- TimeCreated *date.Time `json:"timeCreated,omitempty"`
- // SourceResourceID - READ-ONLY; arm id of source disk
- SourceResourceID *string `json:"sourceResourceId,omitempty"`
- // OsType - READ-ONLY; The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
- HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
- // PurchasePlan - Purchase plan information for the the image from which the OS disk was created.
- PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"`
- // SupportedCapabilities - List of supported capabilities (like accelerated networking) for the image from which the OS disk was created.
- SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"`
- // FamilyID - READ-ONLY; id of the backing snapshot's MIS family
- FamilyID *string `json:"familyId,omitempty"`
- // SourceUniqueID - READ-ONLY; unique incarnation id of the source disk
- SourceUniqueID *string `json:"sourceUniqueId,omitempty"`
- // Encryption - READ-ONLY; Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
- Encryption *Encryption `json:"encryption,omitempty"`
- // SupportsHibernation - Indicates the OS on a disk supports hibernation.
- SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
- NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
- PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
- // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
- DiskAccessID *string `json:"diskAccessId,omitempty"`
- // CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation.
- CompletionPercent *float64 `json:"completionPercent,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DiskRestorePointProperties.
-func (drpp DiskRestorePointProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if drpp.HyperVGeneration != "" {
- objectMap["hyperVGeneration"] = drpp.HyperVGeneration
- }
- if drpp.PurchasePlan != nil {
- objectMap["purchasePlan"] = drpp.PurchasePlan
- }
- if drpp.SupportedCapabilities != nil {
- objectMap["supportedCapabilities"] = drpp.SupportedCapabilities
- }
- if drpp.SupportsHibernation != nil {
- objectMap["supportsHibernation"] = drpp.SupportsHibernation
- }
- if drpp.NetworkAccessPolicy != "" {
- objectMap["networkAccessPolicy"] = drpp.NetworkAccessPolicy
- }
- if drpp.PublicNetworkAccess != "" {
- objectMap["publicNetworkAccess"] = drpp.PublicNetworkAccess
- }
- if drpp.DiskAccessID != nil {
- objectMap["diskAccessId"] = drpp.DiskAccessID
- }
- if drpp.CompletionPercent != nil {
- objectMap["completionPercent"] = drpp.CompletionPercent
- }
- return json.Marshal(objectMap)
-}
-
-// DiskRestorePointRevokeAccessFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type DiskRestorePointRevokeAccessFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DiskRestorePointClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DiskRestorePointRevokeAccessFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DiskRestorePointRevokeAccessFuture.Result.
-func (future *DiskRestorePointRevokeAccessFuture) result(client DiskRestorePointClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DiskRestorePointRevokeAccessFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DiskRestorePointRevokeAccessFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type DisksCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DisksClient) (Disk, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DisksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DisksCreateOrUpdateFuture.Result.
-func (future *DisksCreateOrUpdateFuture) result(client DisksClient) (d Disk, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- d.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent {
- d, err = client.CreateOrUpdateResponder(d.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
-type DisksDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DisksClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DisksDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DisksDeleteFuture.Result.
-func (future *DisksDeleteFuture) result(client DisksClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// DiskSecurityProfile contains the security related information for the resource.
-type DiskSecurityProfile struct {
- // SecurityType - Possible values include: 'DiskSecurityTypesTrustedLaunch'
- SecurityType DiskSecurityTypes `json:"securityType,omitempty"`
-}
-
-// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type DisksGrantAccessFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DisksClient) (AccessURI, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DisksGrantAccessFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DisksGrantAccessFuture.Result.
-func (future *DisksGrantAccessFuture) result(client DisksClient) (au AccessURI, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- au.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent {
- au, err = client.GrantAccessResponder(au.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS,
-// Premium_ZRS, or StandardSSD_ZRS.
-type DiskSku struct {
- // Name - The sku name. Possible values include: 'DiskStorageAccountTypesStandardLRS', 'DiskStorageAccountTypesPremiumLRS', 'DiskStorageAccountTypesStandardSSDLRS', 'DiskStorageAccountTypesUltraSSDLRS', 'DiskStorageAccountTypesPremiumZRS', 'DiskStorageAccountTypesStandardSSDZRS'
- Name DiskStorageAccountTypes `json:"name,omitempty"`
- // Tier - READ-ONLY; The sku tier.
- Tier *string `json:"tier,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DiskSku.
-func (ds DiskSku) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ds.Name != "" {
- objectMap["name"] = ds.Name
- }
- return json.Marshal(objectMap)
-}
-
-// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type DisksRevokeAccessFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DisksClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DisksRevokeAccessFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DisksRevokeAccessFuture.Result.
-func (future *DisksRevokeAccessFuture) result(client DisksClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
-type DisksUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(DisksClient) (Disk, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *DisksUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for DisksUpdateFuture.Result.
-func (future *DisksUpdateFuture) result(client DisksClient) (d Disk, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- d.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent {
- d, err = client.UpdateResponder(d.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// DiskUpdate disk update resource.
-type DiskUpdate struct {
- *DiskUpdateProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
- Sku *DiskSku `json:"sku,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DiskUpdate.
-func (du DiskUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if du.DiskUpdateProperties != nil {
- objectMap["properties"] = du.DiskUpdateProperties
- }
- if du.Tags != nil {
- objectMap["tags"] = du.Tags
- }
- if du.Sku != nil {
- objectMap["sku"] = du.Sku
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct.
-func (du *DiskUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var diskUpdateProperties DiskUpdateProperties
- err = json.Unmarshal(*v, &diskUpdateProperties)
- if err != nil {
- return err
- }
- du.DiskUpdateProperties = &diskUpdateProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- du.Tags = tags
- }
- case "sku":
- if v != nil {
- var sku DiskSku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- du.Sku = &sku
- }
- }
- }
-
- return nil
-}
-
-// DiskUpdateProperties disk resource update properties.
-type DiskUpdateProperties struct {
- // OsType - the Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
- EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
- // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes.
- DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"`
- // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10.
- DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"`
- // DiskIOPSReadOnly - The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes.
- DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"`
- // DiskMBpsReadOnly - The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10.
- DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"`
- // MaxShares - The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
- MaxShares *int32 `json:"maxShares,omitempty"`
- // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
- Encryption *Encryption `json:"encryption,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
- NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
- // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
- DiskAccessID *string `json:"diskAccessId,omitempty"`
- // Tier - Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra disks.
- Tier *string `json:"tier,omitempty"`
- // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. Does not apply to Ultra disks.
- BurstingEnabled *bool `json:"burstingEnabled,omitempty"`
- // PurchasePlan - Purchase plan information to be added on the OS disk
- PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"`
- // SupportedCapabilities - List of supported capabilities (like accelerated networking) to be added on the OS disk.
- SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"`
- // PropertyUpdatesInProgress - READ-ONLY; Properties of the disk for which update is pending.
- PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty"`
- // SupportsHibernation - Indicates the OS on a disk supports hibernation.
- SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
- PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for DiskUpdateProperties.
-func (dup DiskUpdateProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if dup.OsType != "" {
- objectMap["osType"] = dup.OsType
- }
- if dup.DiskSizeGB != nil {
- objectMap["diskSizeGB"] = dup.DiskSizeGB
- }
- if dup.EncryptionSettingsCollection != nil {
- objectMap["encryptionSettingsCollection"] = dup.EncryptionSettingsCollection
- }
- if dup.DiskIOPSReadWrite != nil {
- objectMap["diskIOPSReadWrite"] = dup.DiskIOPSReadWrite
- }
- if dup.DiskMBpsReadWrite != nil {
- objectMap["diskMBpsReadWrite"] = dup.DiskMBpsReadWrite
- }
- if dup.DiskIOPSReadOnly != nil {
- objectMap["diskIOPSReadOnly"] = dup.DiskIOPSReadOnly
- }
- if dup.DiskMBpsReadOnly != nil {
- objectMap["diskMBpsReadOnly"] = dup.DiskMBpsReadOnly
- }
- if dup.MaxShares != nil {
- objectMap["maxShares"] = dup.MaxShares
- }
- if dup.Encryption != nil {
- objectMap["encryption"] = dup.Encryption
- }
- if dup.NetworkAccessPolicy != "" {
- objectMap["networkAccessPolicy"] = dup.NetworkAccessPolicy
- }
- if dup.DiskAccessID != nil {
- objectMap["diskAccessId"] = dup.DiskAccessID
- }
- if dup.Tier != nil {
- objectMap["tier"] = dup.Tier
- }
- if dup.BurstingEnabled != nil {
- objectMap["burstingEnabled"] = dup.BurstingEnabled
- }
- if dup.PurchasePlan != nil {
- objectMap["purchasePlan"] = dup.PurchasePlan
- }
- if dup.SupportedCapabilities != nil {
- objectMap["supportedCapabilities"] = dup.SupportedCapabilities
- }
- if dup.SupportsHibernation != nil {
- objectMap["supportsHibernation"] = dup.SupportsHibernation
- }
- if dup.PublicNetworkAccess != "" {
- objectMap["publicNetworkAccess"] = dup.PublicNetworkAccess
- }
- return json.Marshal(objectMap)
-}
-
-// Encryption encryption at rest settings for disk or snapshot
-type Encryption struct {
- // DiskEncryptionSetID - ResourceId of the disk encryption set to use for enabling encryption at rest.
- DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"`
- // Type - Possible values include: 'EncryptionTypeEncryptionAtRestWithPlatformKey', 'EncryptionTypeEncryptionAtRestWithCustomerKey', 'EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys'
- Type EncryptionType `json:"type,omitempty"`
-}
-
-// EncryptionImages optional. Allows users to provide customer managed keys for encrypting the OS and data
-// disks in the gallery artifact.
-type EncryptionImages struct {
- OsDiskImage *OSDiskImageEncryption `json:"osDiskImage,omitempty"`
- // DataDiskImages - A list of encryption specifications for data disk images.
- DataDiskImages *[]DataDiskImageEncryption `json:"dataDiskImages,omitempty"`
-}
-
-// EncryptionSetIdentity the managed identity for the disk encryption set. It should be given permission on
-// the key vault before it can be used to encrypt disks.
-type EncryptionSetIdentity struct {
- // Type - The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys. Possible values include: 'DiskEncryptionSetIdentityTypeSystemAssigned', 'DiskEncryptionSetIdentityTypeNone'
- Type DiskEncryptionSetIdentityType `json:"type,omitempty"`
- // PrincipalID - READ-ONLY; The object id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a systemAssigned(implicit) identity
- PrincipalID *string `json:"principalId,omitempty"`
- // TenantID - READ-ONLY; The tenant id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a systemAssigned(implicit) identity
- TenantID *string `json:"tenantId,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for EncryptionSetIdentity.
-func (esi EncryptionSetIdentity) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if esi.Type != "" {
- objectMap["type"] = esi.Type
- }
- return json.Marshal(objectMap)
-}
-
-// EncryptionSetProperties ...
-type EncryptionSetProperties struct {
- // EncryptionType - Possible values include: 'DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey', 'DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys'
- EncryptionType DiskEncryptionSetType `json:"encryptionType,omitempty"`
- // ActiveKey - The key vault key which is currently used by this disk encryption set.
- ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"`
- // PreviousKeys - READ-ONLY; A readonly collection of key vault keys previously used by this disk encryption set while a key rotation is in progress. It will be empty if there is no ongoing key rotation.
- PreviousKeys *[]KeyForDiskEncryptionSet `json:"previousKeys,omitempty"`
- // ProvisioningState - READ-ONLY; The disk encryption set provisioning state.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // RotationToLatestKeyVersionEnabled - Set this flag to true to enable auto-updating of this disk encryption set to the latest key version.
- RotationToLatestKeyVersionEnabled *bool `json:"rotationToLatestKeyVersionEnabled,omitempty"`
- // LastKeyRotationTimestamp - READ-ONLY; The time when the active key of this disk encryption set was updated.
- LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"`
- // AutoKeyRotationError - READ-ONLY; The error that was encountered during auto-key rotation. If an error is present, then auto-key rotation will not be attempted until the error on this disk encryption set is fixed.
- AutoKeyRotationError *APIError `json:"autoKeyRotationError,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for EncryptionSetProperties.
-func (esp EncryptionSetProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if esp.EncryptionType != "" {
- objectMap["encryptionType"] = esp.EncryptionType
- }
- if esp.ActiveKey != nil {
- objectMap["activeKey"] = esp.ActiveKey
- }
- if esp.RotationToLatestKeyVersionEnabled != nil {
- objectMap["rotationToLatestKeyVersionEnabled"] = esp.RotationToLatestKeyVersionEnabled
- }
- return json.Marshal(objectMap)
-}
-
-// EncryptionSettingsCollection encryption settings for disk or snapshot
-type EncryptionSettingsCollection struct {
- // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
- Enabled *bool `json:"enabled,omitempty"`
- // EncryptionSettings - A collection of encryption settings, one for each disk volume.
- EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"`
- // EncryptionSettingsVersion - Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption.
- EncryptionSettingsVersion *string `json:"encryptionSettingsVersion,omitempty"`
-}
-
-// EncryptionSettingsElement encryption settings for one disk volume.
-type EncryptionSettingsElement struct {
- // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key
- DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"`
- // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
- KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"`
-}
-
-// ExtendedLocation the complex type of the extended location.
-type ExtendedLocation struct {
- // Name - The name of the extended location.
- Name *string `json:"name,omitempty"`
- // Type - The type of the extended location. Possible values include: 'ExtendedLocationTypesEdgeZone'
- Type ExtendedLocationTypes `json:"type,omitempty"`
-}
-
-// Extension describes a cloud service Extension.
-type Extension struct {
- // Name - The name of the extension.
- Name *string `json:"name,omitempty"`
- Properties *CloudServiceExtensionProperties `json:"properties,omitempty"`
-}
-
-// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type GalleriesCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleriesClient) (Gallery, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleriesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleriesCreateOrUpdateFuture.Result.
-func (future *GalleriesCreateOrUpdateFuture) result(client GalleriesClient) (g Gallery, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- g.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent {
- g, err = client.CreateOrUpdateResponder(g.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type GalleriesDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleriesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleriesDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleriesDeleteFuture.Result.
-func (future *GalleriesDeleteFuture) result(client GalleriesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// GalleriesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type GalleriesUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleriesClient) (Gallery, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleriesUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleriesUpdateFuture.Result.
-func (future *GalleriesUpdateFuture) result(client GalleriesClient) (g Gallery, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- g.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleriesUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent {
- g, err = client.UpdateResponder(g.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", g.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// Gallery specifies information about the Shared Image Gallery that you want to create or update.
-type Gallery struct {
- autorest.Response `json:"-"`
- *GalleryProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for Gallery.
-func (g Gallery) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if g.GalleryProperties != nil {
- objectMap["properties"] = g.GalleryProperties
- }
- if g.Location != nil {
- objectMap["location"] = g.Location
- }
- if g.Tags != nil {
- objectMap["tags"] = g.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for Gallery struct.
-func (g *Gallery) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryProperties GalleryProperties
- err = json.Unmarshal(*v, &galleryProperties)
- if err != nil {
- return err
- }
- g.GalleryProperties = &galleryProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- g.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- g.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- g.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- g.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- g.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryApplication specifies information about the gallery Application Definition that you want to
-// create or update.
-type GalleryApplication struct {
- autorest.Response `json:"-"`
- *GalleryApplicationProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryApplication.
-func (ga GalleryApplication) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ga.GalleryApplicationProperties != nil {
- objectMap["properties"] = ga.GalleryApplicationProperties
- }
- if ga.Location != nil {
- objectMap["location"] = ga.Location
- }
- if ga.Tags != nil {
- objectMap["tags"] = ga.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryApplication struct.
-func (ga *GalleryApplication) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryApplicationProperties GalleryApplicationProperties
- err = json.Unmarshal(*v, &galleryApplicationProperties)
- if err != nil {
- return err
- }
- ga.GalleryApplicationProperties = &galleryApplicationProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- ga.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- ga.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- ga.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- ga.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- ga.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryApplicationList the List Gallery Applications operation response.
-type GalleryApplicationList struct {
- autorest.Response `json:"-"`
- // Value - A list of Gallery Applications.
- Value *[]GalleryApplication `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of Application Definitions in the Application Gallery. Call ListNext() with this to fetch the next page of gallery Application Definitions.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// GalleryApplicationListIterator provides access to a complete listing of GalleryApplication values.
-type GalleryApplicationListIterator struct {
- i int
- page GalleryApplicationListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *GalleryApplicationListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *GalleryApplicationListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter GalleryApplicationListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter GalleryApplicationListIterator) Response() GalleryApplicationList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter GalleryApplicationListIterator) Value() GalleryApplication {
- if !iter.page.NotDone() {
- return GalleryApplication{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the GalleryApplicationListIterator type.
-func NewGalleryApplicationListIterator(page GalleryApplicationListPage) GalleryApplicationListIterator {
- return GalleryApplicationListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (gal GalleryApplicationList) IsEmpty() bool {
- return gal.Value == nil || len(*gal.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (gal GalleryApplicationList) hasNextLink() bool {
- return gal.NextLink != nil && len(*gal.NextLink) != 0
-}
-
-// galleryApplicationListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (gal GalleryApplicationList) galleryApplicationListPreparer(ctx context.Context) (*http.Request, error) {
- if !gal.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(gal.NextLink)))
-}
-
-// GalleryApplicationListPage contains a page of GalleryApplication values.
-type GalleryApplicationListPage struct {
- fn func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)
- gal GalleryApplicationList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *GalleryApplicationListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.gal)
- if err != nil {
- return err
- }
- page.gal = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *GalleryApplicationListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page GalleryApplicationListPage) NotDone() bool {
- return !page.gal.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page GalleryApplicationListPage) Response() GalleryApplicationList {
- return page.gal
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page GalleryApplicationListPage) Values() []GalleryApplication {
- if page.gal.IsEmpty() {
- return nil
- }
- return *page.gal.Value
-}
-
-// Creates a new instance of the GalleryApplicationListPage type.
-func NewGalleryApplicationListPage(cur GalleryApplicationList, getNextPage func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)) GalleryApplicationListPage {
- return GalleryApplicationListPage{
- fn: getNextPage,
- gal: cur,
- }
-}
-
-// GalleryApplicationProperties describes the properties of a gallery Application Definition.
-type GalleryApplicationProperties struct {
- // Description - The description of this gallery Application Definition resource. This property is updatable.
- Description *string `json:"description,omitempty"`
- // Eula - The Eula agreement for the gallery Application Definition.
- Eula *string `json:"eula,omitempty"`
- // PrivacyStatementURI - The privacy statement uri.
- PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"`
- // ReleaseNoteURI - The release note uri.
- ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery Application Definition. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
- // SupportedOSType - This property allows you to specify the supported type of the OS that application is built for.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- SupportedOSType OperatingSystemTypes `json:"supportedOSType,omitempty"`
-}
-
-// GalleryApplicationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryApplicationsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryApplicationsClient) (GalleryApplication, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryApplicationsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryApplicationsCreateOrUpdateFuture.Result.
-func (future *GalleryApplicationsCreateOrUpdateFuture) result(client GalleryApplicationsClient) (ga GalleryApplication, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ga.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent {
- ga, err = client.CreateOrUpdateResponder(ga.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", ga.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryApplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryApplicationsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryApplicationsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryApplicationsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryApplicationsDeleteFuture.Result.
-func (future *GalleryApplicationsDeleteFuture) result(client GalleryApplicationsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// GalleryApplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryApplicationsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryApplicationsClient) (GalleryApplication, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryApplicationsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryApplicationsUpdateFuture.Result.
-func (future *GalleryApplicationsUpdateFuture) result(client GalleryApplicationsClient) (ga GalleryApplication, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ga.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent {
- ga, err = client.UpdateResponder(ga.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", ga.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryApplicationUpdate specifies information about the gallery Application Definition that you want to
-// update.
-type GalleryApplicationUpdate struct {
- *GalleryApplicationProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryApplicationUpdate.
-func (gau GalleryApplicationUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gau.GalleryApplicationProperties != nil {
- objectMap["properties"] = gau.GalleryApplicationProperties
- }
- if gau.Tags != nil {
- objectMap["tags"] = gau.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryApplicationUpdate struct.
-func (gau *GalleryApplicationUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryApplicationProperties GalleryApplicationProperties
- err = json.Unmarshal(*v, &galleryApplicationProperties)
- if err != nil {
- return err
- }
- gau.GalleryApplicationProperties = &galleryApplicationProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- gau.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- gau.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- gau.Type = &typeVar
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- gau.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryApplicationVersion specifies information about the gallery Application Version that you want to
-// create or update.
-type GalleryApplicationVersion struct {
- autorest.Response `json:"-"`
- *GalleryApplicationVersionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryApplicationVersion.
-func (gav GalleryApplicationVersion) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gav.GalleryApplicationVersionProperties != nil {
- objectMap["properties"] = gav.GalleryApplicationVersionProperties
- }
- if gav.Location != nil {
- objectMap["location"] = gav.Location
- }
- if gav.Tags != nil {
- objectMap["tags"] = gav.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersion struct.
-func (gav *GalleryApplicationVersion) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryApplicationVersionProperties GalleryApplicationVersionProperties
- err = json.Unmarshal(*v, &galleryApplicationVersionProperties)
- if err != nil {
- return err
- }
- gav.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- gav.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- gav.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- gav.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- gav.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- gav.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryApplicationVersionList the List Gallery Application version operation response.
-type GalleryApplicationVersionList struct {
- autorest.Response `json:"-"`
- // Value - A list of gallery Application Versions.
- Value *[]GalleryApplicationVersion `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of gallery Application Versions. Call ListNext() with this to fetch the next page of gallery Application Versions.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// GalleryApplicationVersionListIterator provides access to a complete listing of GalleryApplicationVersion
-// values.
-type GalleryApplicationVersionListIterator struct {
- i int
- page GalleryApplicationVersionListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *GalleryApplicationVersionListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *GalleryApplicationVersionListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter GalleryApplicationVersionListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter GalleryApplicationVersionListIterator) Response() GalleryApplicationVersionList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter GalleryApplicationVersionListIterator) Value() GalleryApplicationVersion {
- if !iter.page.NotDone() {
- return GalleryApplicationVersion{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the GalleryApplicationVersionListIterator type.
-func NewGalleryApplicationVersionListIterator(page GalleryApplicationVersionListPage) GalleryApplicationVersionListIterator {
- return GalleryApplicationVersionListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (gavl GalleryApplicationVersionList) IsEmpty() bool {
- return gavl.Value == nil || len(*gavl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (gavl GalleryApplicationVersionList) hasNextLink() bool {
- return gavl.NextLink != nil && len(*gavl.NextLink) != 0
-}
-
-// galleryApplicationVersionListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (gavl GalleryApplicationVersionList) galleryApplicationVersionListPreparer(ctx context.Context) (*http.Request, error) {
- if !gavl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(gavl.NextLink)))
-}
-
-// GalleryApplicationVersionListPage contains a page of GalleryApplicationVersion values.
-type GalleryApplicationVersionListPage struct {
- fn func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)
- gavl GalleryApplicationVersionList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *GalleryApplicationVersionListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.gavl)
- if err != nil {
- return err
- }
- page.gavl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *GalleryApplicationVersionListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page GalleryApplicationVersionListPage) NotDone() bool {
- return !page.gavl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page GalleryApplicationVersionListPage) Response() GalleryApplicationVersionList {
- return page.gavl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page GalleryApplicationVersionListPage) Values() []GalleryApplicationVersion {
- if page.gavl.IsEmpty() {
- return nil
- }
- return *page.gavl.Value
-}
-
-// Creates a new instance of the GalleryApplicationVersionListPage type.
-func NewGalleryApplicationVersionListPage(cur GalleryApplicationVersionList, getNextPage func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)) GalleryApplicationVersionListPage {
- return GalleryApplicationVersionListPage{
- fn: getNextPage,
- gavl: cur,
- }
-}
-
-// GalleryApplicationVersionProperties describes the properties of a gallery image version.
-type GalleryApplicationVersionProperties struct {
- PublishingProfile *GalleryApplicationVersionPublishingProfile `json:"publishingProfile,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating'
- ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"`
- // ReplicationStatus - READ-ONLY
- ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryApplicationVersionProperties.
-func (gavp GalleryApplicationVersionProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gavp.PublishingProfile != nil {
- objectMap["publishingProfile"] = gavp.PublishingProfile
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryApplicationVersionPublishingProfile the publishing profile of a gallery image version.
-type GalleryApplicationVersionPublishingProfile struct {
- Source *UserArtifactSource `json:"source,omitempty"`
- ManageActions *UserArtifactManage `json:"manageActions,omitempty"`
- // EnableHealthCheck - Optional. Whether or not this application reports health.
- EnableHealthCheck *bool `json:"enableHealthCheck,omitempty"`
- // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable.
- TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"`
- // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
- ReplicaCount *int32 `json:"replicaCount,omitempty"`
- // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
- ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"`
- // PublishedDate - READ-ONLY; The timestamp for when the gallery image version is published.
- PublishedDate *date.Time `json:"publishedDate,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
- // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS'
- StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
- // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow'
- ReplicationMode ReplicationMode `json:"replicationMode,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryApplicationVersionPublishingProfile.
-func (gavpp GalleryApplicationVersionPublishingProfile) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gavpp.Source != nil {
- objectMap["source"] = gavpp.Source
- }
- if gavpp.ManageActions != nil {
- objectMap["manageActions"] = gavpp.ManageActions
- }
- if gavpp.EnableHealthCheck != nil {
- objectMap["enableHealthCheck"] = gavpp.EnableHealthCheck
- }
- if gavpp.TargetRegions != nil {
- objectMap["targetRegions"] = gavpp.TargetRegions
- }
- if gavpp.ReplicaCount != nil {
- objectMap["replicaCount"] = gavpp.ReplicaCount
- }
- if gavpp.ExcludeFromLatest != nil {
- objectMap["excludeFromLatest"] = gavpp.ExcludeFromLatest
- }
- if gavpp.EndOfLifeDate != nil {
- objectMap["endOfLifeDate"] = gavpp.EndOfLifeDate
- }
- if gavpp.StorageAccountType != "" {
- objectMap["storageAccountType"] = gavpp.StorageAccountType
- }
- if gavpp.ReplicationMode != "" {
- objectMap["replicationMode"] = gavpp.ReplicationMode
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryApplicationVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type GalleryApplicationVersionsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryApplicationVersionsClient) (GalleryApplicationVersion, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryApplicationVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryApplicationVersionsCreateOrUpdateFuture.Result.
-func (future *GalleryApplicationVersionsCreateOrUpdateFuture) result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- gav.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent {
- gav, err = client.CreateOrUpdateResponder(gav.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", gav.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryApplicationVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryApplicationVersionsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryApplicationVersionsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryApplicationVersionsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryApplicationVersionsDeleteFuture.Result.
-func (future *GalleryApplicationVersionsDeleteFuture) result(client GalleryApplicationVersionsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// GalleryApplicationVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryApplicationVersionsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryApplicationVersionsClient) (GalleryApplicationVersion, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryApplicationVersionsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryApplicationVersionsUpdateFuture.Result.
-func (future *GalleryApplicationVersionsUpdateFuture) result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- gav.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent {
- gav, err = client.UpdateResponder(gav.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", gav.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryApplicationVersionUpdate specifies information about the gallery Application Version that you
-// want to update.
-type GalleryApplicationVersionUpdate struct {
- *GalleryApplicationVersionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryApplicationVersionUpdate.
-func (gavu GalleryApplicationVersionUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gavu.GalleryApplicationVersionProperties != nil {
- objectMap["properties"] = gavu.GalleryApplicationVersionProperties
- }
- if gavu.Tags != nil {
- objectMap["tags"] = gavu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersionUpdate struct.
-func (gavu *GalleryApplicationVersionUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryApplicationVersionProperties GalleryApplicationVersionProperties
- err = json.Unmarshal(*v, &galleryApplicationVersionProperties)
- if err != nil {
- return err
- }
- gavu.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- gavu.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- gavu.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- gavu.Type = &typeVar
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- gavu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryArtifactPublishingProfileBase describes the basic gallery artifact publishing profile.
-type GalleryArtifactPublishingProfileBase struct {
- // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable.
- TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"`
- // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
- ReplicaCount *int32 `json:"replicaCount,omitempty"`
- // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
- ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"`
- // PublishedDate - READ-ONLY; The timestamp for when the gallery image version is published.
- PublishedDate *date.Time `json:"publishedDate,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
- // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS'
- StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
- // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow'
- ReplicationMode ReplicationMode `json:"replicationMode,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryArtifactPublishingProfileBase.
-func (gappb GalleryArtifactPublishingProfileBase) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gappb.TargetRegions != nil {
- objectMap["targetRegions"] = gappb.TargetRegions
- }
- if gappb.ReplicaCount != nil {
- objectMap["replicaCount"] = gappb.ReplicaCount
- }
- if gappb.ExcludeFromLatest != nil {
- objectMap["excludeFromLatest"] = gappb.ExcludeFromLatest
- }
- if gappb.EndOfLifeDate != nil {
- objectMap["endOfLifeDate"] = gappb.EndOfLifeDate
- }
- if gappb.StorageAccountType != "" {
- objectMap["storageAccountType"] = gappb.StorageAccountType
- }
- if gappb.ReplicationMode != "" {
- objectMap["replicationMode"] = gappb.ReplicationMode
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryArtifactSource the source image from which the Image Version is going to be created.
-type GalleryArtifactSource struct {
- ManagedImage *ManagedArtifact `json:"managedImage,omitempty"`
-}
-
-// GalleryArtifactVersionSource the gallery artifact version source.
-type GalleryArtifactVersionSource struct {
- // ID - The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource.
- ID *string `json:"id,omitempty"`
- // URI - The uri of the gallery artifact version source. Currently used to specify vhd/blob source.
- URI *string `json:"uri,omitempty"`
-}
-
-// GalleryDataDiskImage this is the data disk image.
-type GalleryDataDiskImage struct {
- // Lun - This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine.
- Lun *int32 `json:"lun,omitempty"`
- // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created.
- SizeInGB *int32 `json:"sizeInGB,omitempty"`
- // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite'
- HostCaching HostCaching `json:"hostCaching,omitempty"`
- Source *GalleryArtifactVersionSource `json:"source,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryDataDiskImage.
-func (gddi GalleryDataDiskImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gddi.Lun != nil {
- objectMap["lun"] = gddi.Lun
- }
- if gddi.HostCaching != "" {
- objectMap["hostCaching"] = gddi.HostCaching
- }
- if gddi.Source != nil {
- objectMap["source"] = gddi.Source
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryDiskImage this is the disk image base class.
-type GalleryDiskImage struct {
- // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created.
- SizeInGB *int32 `json:"sizeInGB,omitempty"`
- // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite'
- HostCaching HostCaching `json:"hostCaching,omitempty"`
- Source *GalleryArtifactVersionSource `json:"source,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryDiskImage.
-func (gdi GalleryDiskImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gdi.HostCaching != "" {
- objectMap["hostCaching"] = gdi.HostCaching
- }
- if gdi.Source != nil {
- objectMap["source"] = gdi.Source
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryIdentifier describes the gallery unique name.
-type GalleryIdentifier struct {
- // UniqueName - READ-ONLY; The unique name of the Shared Image Gallery. This name is generated automatically by Azure.
- UniqueName *string `json:"uniqueName,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryIdentifier.
-func (gi GalleryIdentifier) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// GalleryImage specifies information about the gallery image definition that you want to create or update.
-type GalleryImage struct {
- autorest.Response `json:"-"`
- *GalleryImageProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryImage.
-func (gi GalleryImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gi.GalleryImageProperties != nil {
- objectMap["properties"] = gi.GalleryImageProperties
- }
- if gi.Location != nil {
- objectMap["location"] = gi.Location
- }
- if gi.Tags != nil {
- objectMap["tags"] = gi.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryImage struct.
-func (gi *GalleryImage) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryImageProperties GalleryImageProperties
- err = json.Unmarshal(*v, &galleryImageProperties)
- if err != nil {
- return err
- }
- gi.GalleryImageProperties = &galleryImageProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- gi.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- gi.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- gi.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- gi.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- gi.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryImageFeature a feature for gallery image.
-type GalleryImageFeature struct {
- // Name - The name of the gallery image feature.
- Name *string `json:"name,omitempty"`
- // Value - The value of the gallery image feature.
- Value *string `json:"value,omitempty"`
-}
-
-// GalleryImageIdentifier this is the gallery image definition identifier.
-type GalleryImageIdentifier struct {
- // Publisher - The name of the gallery image definition publisher.
- Publisher *string `json:"publisher,omitempty"`
- // Offer - The name of the gallery image definition offer.
- Offer *string `json:"offer,omitempty"`
- // Sku - The name of the gallery image definition SKU.
- Sku *string `json:"sku,omitempty"`
-}
-
-// GalleryImageList the List Gallery Images operation response.
-type GalleryImageList struct {
- autorest.Response `json:"-"`
- // Value - A list of Shared Image Gallery images.
- Value *[]GalleryImage `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of Image Definitions in the Shared Image Gallery. Call ListNext() with this to fetch the next page of gallery image definitions.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// GalleryImageListIterator provides access to a complete listing of GalleryImage values.
-type GalleryImageListIterator struct {
- i int
- page GalleryImageListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *GalleryImageListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *GalleryImageListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter GalleryImageListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter GalleryImageListIterator) Response() GalleryImageList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter GalleryImageListIterator) Value() GalleryImage {
- if !iter.page.NotDone() {
- return GalleryImage{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the GalleryImageListIterator type.
-func NewGalleryImageListIterator(page GalleryImageListPage) GalleryImageListIterator {
- return GalleryImageListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (gil GalleryImageList) IsEmpty() bool {
- return gil.Value == nil || len(*gil.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (gil GalleryImageList) hasNextLink() bool {
- return gil.NextLink != nil && len(*gil.NextLink) != 0
-}
-
-// galleryImageListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (gil GalleryImageList) galleryImageListPreparer(ctx context.Context) (*http.Request, error) {
- if !gil.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(gil.NextLink)))
-}
-
-// GalleryImageListPage contains a page of GalleryImage values.
-type GalleryImageListPage struct {
- fn func(context.Context, GalleryImageList) (GalleryImageList, error)
- gil GalleryImageList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *GalleryImageListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.gil)
- if err != nil {
- return err
- }
- page.gil = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *GalleryImageListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page GalleryImageListPage) NotDone() bool {
- return !page.gil.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page GalleryImageListPage) Response() GalleryImageList {
- return page.gil
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page GalleryImageListPage) Values() []GalleryImage {
- if page.gil.IsEmpty() {
- return nil
- }
- return *page.gil.Value
-}
-
-// Creates a new instance of the GalleryImageListPage type.
-func NewGalleryImageListPage(cur GalleryImageList, getNextPage func(context.Context, GalleryImageList) (GalleryImageList, error)) GalleryImageListPage {
- return GalleryImageListPage{
- fn: getNextPage,
- gil: cur,
- }
-}
-
-// GalleryImageProperties describes the properties of a gallery image definition.
-type GalleryImageProperties struct {
- // Description - The description of this gallery image definition resource. This property is updatable.
- Description *string `json:"description,omitempty"`
- // Eula - The Eula agreement for the gallery image definition.
- Eula *string `json:"eula,omitempty"`
- // PrivacyStatementURI - The privacy statement uri.
- PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"`
- // ReleaseNoteURI - The release note uri.
- ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"`
- // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized'
- OsState OperatingSystemStateTypes `json:"osState,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
- HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
- Identifier *GalleryImageIdentifier `json:"identifier,omitempty"`
- Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"`
- Disallowed *Disallowed `json:"disallowed,omitempty"`
- PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating'
- ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"`
- // Features - A list of gallery image features.
- Features *[]GalleryImageFeature `json:"features,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryImageProperties.
-func (gip GalleryImageProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gip.Description != nil {
- objectMap["description"] = gip.Description
- }
- if gip.Eula != nil {
- objectMap["eula"] = gip.Eula
- }
- if gip.PrivacyStatementURI != nil {
- objectMap["privacyStatementUri"] = gip.PrivacyStatementURI
- }
- if gip.ReleaseNoteURI != nil {
- objectMap["releaseNoteUri"] = gip.ReleaseNoteURI
- }
- if gip.OsType != "" {
- objectMap["osType"] = gip.OsType
- }
- if gip.OsState != "" {
- objectMap["osState"] = gip.OsState
- }
- if gip.HyperVGeneration != "" {
- objectMap["hyperVGeneration"] = gip.HyperVGeneration
- }
- if gip.EndOfLifeDate != nil {
- objectMap["endOfLifeDate"] = gip.EndOfLifeDate
- }
- if gip.Identifier != nil {
- objectMap["identifier"] = gip.Identifier
- }
- if gip.Recommended != nil {
- objectMap["recommended"] = gip.Recommended
- }
- if gip.Disallowed != nil {
- objectMap["disallowed"] = gip.Disallowed
- }
- if gip.PurchasePlan != nil {
- objectMap["purchasePlan"] = gip.PurchasePlan
- }
- if gip.Features != nil {
- objectMap["features"] = gip.Features
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryImagesCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryImagesClient) (GalleryImage, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryImagesCreateOrUpdateFuture.Result.
-func (future *GalleryImagesCreateOrUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- gi.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent {
- gi, err = client.CreateOrUpdateResponder(gi.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type GalleryImagesDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryImagesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryImagesDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryImagesDeleteFuture.Result.
-func (future *GalleryImagesDeleteFuture) result(client GalleryImagesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// GalleryImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type GalleryImagesUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryImagesClient) (GalleryImage, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryImagesUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryImagesUpdateFuture.Result.
-func (future *GalleryImagesUpdateFuture) result(client GalleryImagesClient) (gi GalleryImage, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- gi.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent {
- gi, err = client.UpdateResponder(gi.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", gi.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryImageUpdate specifies information about the gallery image definition that you want to update.
-type GalleryImageUpdate struct {
- *GalleryImageProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryImageUpdate.
-func (giu GalleryImageUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if giu.GalleryImageProperties != nil {
- objectMap["properties"] = giu.GalleryImageProperties
- }
- if giu.Tags != nil {
- objectMap["tags"] = giu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryImageUpdate struct.
-func (giu *GalleryImageUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryImageProperties GalleryImageProperties
- err = json.Unmarshal(*v, &galleryImageProperties)
- if err != nil {
- return err
- }
- giu.GalleryImageProperties = &galleryImageProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- giu.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- giu.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- giu.Type = &typeVar
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- giu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryImageVersion specifies information about the gallery image version that you want to create or
-// update.
-type GalleryImageVersion struct {
- autorest.Response `json:"-"`
- *GalleryImageVersionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryImageVersion.
-func (giv GalleryImageVersion) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if giv.GalleryImageVersionProperties != nil {
- objectMap["properties"] = giv.GalleryImageVersionProperties
- }
- if giv.Location != nil {
- objectMap["location"] = giv.Location
- }
- if giv.Tags != nil {
- objectMap["tags"] = giv.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryImageVersion struct.
-func (giv *GalleryImageVersion) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryImageVersionProperties GalleryImageVersionProperties
- err = json.Unmarshal(*v, &galleryImageVersionProperties)
- if err != nil {
- return err
- }
- giv.GalleryImageVersionProperties = &galleryImageVersionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- giv.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- giv.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- giv.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- giv.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- giv.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryImageVersionList the List Gallery Image version operation response.
-type GalleryImageVersionList struct {
- autorest.Response `json:"-"`
- // Value - A list of gallery image versions.
- Value *[]GalleryImageVersion `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of gallery image versions. Call ListNext() with this to fetch the next page of gallery image versions.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// GalleryImageVersionListIterator provides access to a complete listing of GalleryImageVersion values.
-type GalleryImageVersionListIterator struct {
- i int
- page GalleryImageVersionListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *GalleryImageVersionListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *GalleryImageVersionListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter GalleryImageVersionListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter GalleryImageVersionListIterator) Response() GalleryImageVersionList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter GalleryImageVersionListIterator) Value() GalleryImageVersion {
- if !iter.page.NotDone() {
- return GalleryImageVersion{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the GalleryImageVersionListIterator type.
-func NewGalleryImageVersionListIterator(page GalleryImageVersionListPage) GalleryImageVersionListIterator {
- return GalleryImageVersionListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (givl GalleryImageVersionList) IsEmpty() bool {
- return givl.Value == nil || len(*givl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (givl GalleryImageVersionList) hasNextLink() bool {
- return givl.NextLink != nil && len(*givl.NextLink) != 0
-}
-
-// galleryImageVersionListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (givl GalleryImageVersionList) galleryImageVersionListPreparer(ctx context.Context) (*http.Request, error) {
- if !givl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(givl.NextLink)))
-}
-
-// GalleryImageVersionListPage contains a page of GalleryImageVersion values.
-type GalleryImageVersionListPage struct {
- fn func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error)
- givl GalleryImageVersionList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *GalleryImageVersionListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.givl)
- if err != nil {
- return err
- }
- page.givl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *GalleryImageVersionListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page GalleryImageVersionListPage) NotDone() bool {
- return !page.givl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page GalleryImageVersionListPage) Response() GalleryImageVersionList {
- return page.givl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page GalleryImageVersionListPage) Values() []GalleryImageVersion {
- if page.givl.IsEmpty() {
- return nil
- }
- return *page.givl.Value
-}
-
-// Creates a new instance of the GalleryImageVersionListPage type.
-func NewGalleryImageVersionListPage(cur GalleryImageVersionList, getNextPage func(context.Context, GalleryImageVersionList) (GalleryImageVersionList, error)) GalleryImageVersionListPage {
- return GalleryImageVersionListPage{
- fn: getNextPage,
- givl: cur,
- }
-}
-
-// GalleryImageVersionProperties describes the properties of a gallery image version.
-type GalleryImageVersionProperties struct {
- PublishingProfile *GalleryImageVersionPublishingProfile `json:"publishingProfile,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState3Creating', 'ProvisioningState3Updating', 'ProvisioningState3Failed', 'ProvisioningState3Succeeded', 'ProvisioningState3Deleting', 'ProvisioningState3Migrating'
- ProvisioningState ProvisioningState3 `json:"provisioningState,omitempty"`
- StorageProfile *GalleryImageVersionStorageProfile `json:"storageProfile,omitempty"`
- // ReplicationStatus - READ-ONLY
- ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryImageVersionProperties.
-func (givp GalleryImageVersionProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if givp.PublishingProfile != nil {
- objectMap["publishingProfile"] = givp.PublishingProfile
- }
- if givp.StorageProfile != nil {
- objectMap["storageProfile"] = givp.StorageProfile
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryImageVersionPublishingProfile the publishing profile of a gallery image Version.
-type GalleryImageVersionPublishingProfile struct {
- // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable.
- TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"`
- // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
- ReplicaCount *int32 `json:"replicaCount,omitempty"`
- // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
- ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"`
- // PublishedDate - READ-ONLY; The timestamp for when the gallery image version is published.
- PublishedDate *date.Time `json:"publishedDate,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
- // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS'
- StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
- // ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. Possible values include: 'ReplicationModeFull', 'ReplicationModeShallow'
- ReplicationMode ReplicationMode `json:"replicationMode,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryImageVersionPublishingProfile.
-func (givpp GalleryImageVersionPublishingProfile) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if givpp.TargetRegions != nil {
- objectMap["targetRegions"] = givpp.TargetRegions
- }
- if givpp.ReplicaCount != nil {
- objectMap["replicaCount"] = givpp.ReplicaCount
- }
- if givpp.ExcludeFromLatest != nil {
- objectMap["excludeFromLatest"] = givpp.ExcludeFromLatest
- }
- if givpp.EndOfLifeDate != nil {
- objectMap["endOfLifeDate"] = givpp.EndOfLifeDate
- }
- if givpp.StorageAccountType != "" {
- objectMap["storageAccountType"] = givpp.StorageAccountType
- }
- if givpp.ReplicationMode != "" {
- objectMap["replicationMode"] = givpp.ReplicationMode
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryImageVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryImageVersionsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryImageVersionsClient) (GalleryImageVersion, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryImageVersionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryImageVersionsCreateOrUpdateFuture.Result.
-func (future *GalleryImageVersionsCreateOrUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- giv.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent {
- giv, err = client.CreateOrUpdateResponder(giv.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryImageVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryImageVersionsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryImageVersionsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryImageVersionsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryImageVersionsDeleteFuture.Result.
-func (future *GalleryImageVersionsDeleteFuture) result(client GalleryImageVersionsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// GalleryImageVersionStorageProfile this is the storage profile of a Gallery Image Version.
-type GalleryImageVersionStorageProfile struct {
- Source *GalleryArtifactVersionSource `json:"source,omitempty"`
- OsDiskImage *GalleryOSDiskImage `json:"osDiskImage,omitempty"`
- // DataDiskImages - A list of data disk images.
- DataDiskImages *[]GalleryDataDiskImage `json:"dataDiskImages,omitempty"`
-}
-
-// GalleryImageVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GalleryImageVersionsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GalleryImageVersionsClient) (GalleryImageVersion, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GalleryImageVersionsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GalleryImageVersionsUpdateFuture.Result.
-func (future *GalleryImageVersionsUpdateFuture) result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- giv.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent {
- giv, err = client.UpdateResponder(giv.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", giv.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryImageVersionUpdate specifies information about the gallery image version that you want to update.
-type GalleryImageVersionUpdate struct {
- *GalleryImageVersionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryImageVersionUpdate.
-func (givu GalleryImageVersionUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if givu.GalleryImageVersionProperties != nil {
- objectMap["properties"] = givu.GalleryImageVersionProperties
- }
- if givu.Tags != nil {
- objectMap["tags"] = givu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryImageVersionUpdate struct.
-func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryImageVersionProperties GalleryImageVersionProperties
- err = json.Unmarshal(*v, &galleryImageVersionProperties)
- if err != nil {
- return err
- }
- givu.GalleryImageVersionProperties = &galleryImageVersionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- givu.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- givu.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- givu.Type = &typeVar
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- givu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GalleryList the List Galleries operation response.
-type GalleryList struct {
- autorest.Response `json:"-"`
- // Value - A list of galleries.
- Value *[]Gallery `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of galleries. Call ListNext() with this to fetch the next page of galleries.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// GalleryListIterator provides access to a complete listing of Gallery values.
-type GalleryListIterator struct {
- i int
- page GalleryListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *GalleryListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *GalleryListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter GalleryListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter GalleryListIterator) Response() GalleryList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter GalleryListIterator) Value() Gallery {
- if !iter.page.NotDone() {
- return Gallery{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the GalleryListIterator type.
-func NewGalleryListIterator(page GalleryListPage) GalleryListIterator {
- return GalleryListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (gl GalleryList) IsEmpty() bool {
- return gl.Value == nil || len(*gl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (gl GalleryList) hasNextLink() bool {
- return gl.NextLink != nil && len(*gl.NextLink) != 0
-}
-
-// galleryListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (gl GalleryList) galleryListPreparer(ctx context.Context) (*http.Request, error) {
- if !gl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(gl.NextLink)))
-}
-
-// GalleryListPage contains a page of Gallery values.
-type GalleryListPage struct {
- fn func(context.Context, GalleryList) (GalleryList, error)
- gl GalleryList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *GalleryListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/GalleryListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.gl)
- if err != nil {
- return err
- }
- page.gl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *GalleryListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page GalleryListPage) NotDone() bool {
- return !page.gl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page GalleryListPage) Response() GalleryList {
- return page.gl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page GalleryListPage) Values() []Gallery {
- if page.gl.IsEmpty() {
- return nil
- }
- return *page.gl.Value
-}
-
-// Creates a new instance of the GalleryListPage type.
-func NewGalleryListPage(cur GalleryList, getNextPage func(context.Context, GalleryList) (GalleryList, error)) GalleryListPage {
- return GalleryListPage{
- fn: getNextPage,
- gl: cur,
- }
-}
-
-// GalleryOSDiskImage this is the OS disk image.
-type GalleryOSDiskImage struct {
- // SizeInGB - READ-ONLY; This property indicates the size of the VHD to be created.
- SizeInGB *int32 `json:"sizeInGB,omitempty"`
- // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite'
- HostCaching HostCaching `json:"hostCaching,omitempty"`
- Source *GalleryArtifactVersionSource `json:"source,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryOSDiskImage.
-func (godi GalleryOSDiskImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if godi.HostCaching != "" {
- objectMap["hostCaching"] = godi.HostCaching
- }
- if godi.Source != nil {
- objectMap["source"] = godi.Source
- }
- return json.Marshal(objectMap)
-}
-
-// GalleryProperties describes the properties of a Shared Image Gallery.
-type GalleryProperties struct {
- // Description - The description of this Shared Image Gallery resource. This property is updatable.
- Description *string `json:"description,omitempty"`
- Identifier *GalleryIdentifier `json:"identifier,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateUpdating', 'ProvisioningStateFailed', 'ProvisioningStateSucceeded', 'ProvisioningStateDeleting', 'ProvisioningStateMigrating'
- ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
- SharingProfile *SharingProfile `json:"sharingProfile,omitempty"`
- SoftDeletePolicy *SoftDeletePolicy `json:"softDeletePolicy,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryProperties.
-func (gp GalleryProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gp.Description != nil {
- objectMap["description"] = gp.Description
- }
- if gp.Identifier != nil {
- objectMap["identifier"] = gp.Identifier
- }
- if gp.SharingProfile != nil {
- objectMap["sharingProfile"] = gp.SharingProfile
- }
- if gp.SoftDeletePolicy != nil {
- objectMap["softDeletePolicy"] = gp.SoftDeletePolicy
- }
- return json.Marshal(objectMap)
-}
-
-// GallerySharingProfileUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type GallerySharingProfileUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(GallerySharingProfileClient) (SharingUpdate, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *GallerySharingProfileUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for GallerySharingProfileUpdateFuture.Result.
-func (future *GallerySharingProfileUpdateFuture) result(client GallerySharingProfileClient) (su SharingUpdate, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GallerySharingProfileUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- su.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.GallerySharingProfileUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if su.Response.Response, err = future.GetResult(sender); err == nil && su.Response.Response.StatusCode != http.StatusNoContent {
- su, err = client.UpdateResponder(su.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GallerySharingProfileUpdateFuture", "Result", su.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// GalleryUpdate specifies information about the Shared Image Gallery that you want to update.
-type GalleryUpdate struct {
- *GalleryProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for GalleryUpdate.
-func (gu GalleryUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if gu.GalleryProperties != nil {
- objectMap["properties"] = gu.GalleryProperties
- }
- if gu.Tags != nil {
- objectMap["tags"] = gu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for GalleryUpdate struct.
-func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var galleryProperties GalleryProperties
- err = json.Unmarshal(*v, &galleryProperties)
- if err != nil {
- return err
- }
- gu.GalleryProperties = &galleryProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- gu.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- gu.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- gu.Type = &typeVar
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- gu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// GrantAccessData data used for requesting a SAS.
-type GrantAccessData struct {
- // Access - Possible values include: 'AccessLevelNone', 'AccessLevelRead', 'AccessLevelWrite'
- Access AccessLevel `json:"access,omitempty"`
- // DurationInSeconds - Time duration in seconds until the SAS access expires.
- DurationInSeconds *int32 `json:"durationInSeconds,omitempty"`
-}
-
-// HardwareProfile specifies the hardware settings for the virtual machine.
-type HardwareProfile struct {
- // VMSize - Specifies the size of the virtual machine.
The enum data type is currently deprecated and will be removed by December 23rd 2023.
Recommended way to get the list of available sizes is using these APIs:
[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)
[List all available virtual machine sizes in a region]( https://docs.microsoft.com/rest/api/compute/resourceskus/list)
[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/sizes).
The available VM sizes depend on region and availability set. Possible values include: 'VirtualMachineSizeTypesBasicA0', 'VirtualMachineSizeTypesBasicA1', 'VirtualMachineSizeTypesBasicA2', 'VirtualMachineSizeTypesBasicA3', 'VirtualMachineSizeTypesBasicA4', 'VirtualMachineSizeTypesStandardA0', 'VirtualMachineSizeTypesStandardA1', 'VirtualMachineSizeTypesStandardA2', 'VirtualMachineSizeTypesStandardA3', 'VirtualMachineSizeTypesStandardA4', 'VirtualMachineSizeTypesStandardA5', 'VirtualMachineSizeTypesStandardA6', 'VirtualMachineSizeTypesStandardA7', 'VirtualMachineSizeTypesStandardA8', 'VirtualMachineSizeTypesStandardA9', 'VirtualMachineSizeTypesStandardA10', 'VirtualMachineSizeTypesStandardA11', 'VirtualMachineSizeTypesStandardA1V2', 'VirtualMachineSizeTypesStandardA2V2', 'VirtualMachineSizeTypesStandardA4V2', 'VirtualMachineSizeTypesStandardA8V2', 'VirtualMachineSizeTypesStandardA2mV2', 'VirtualMachineSizeTypesStandardA4mV2', 'VirtualMachineSizeTypesStandardA8mV2', 'VirtualMachineSizeTypesStandardB1s', 'VirtualMachineSizeTypesStandardB1ms', 'VirtualMachineSizeTypesStandardB2s', 'VirtualMachineSizeTypesStandardB2ms', 'VirtualMachineSizeTypesStandardB4ms', 'VirtualMachineSizeTypesStandardB8ms', 'VirtualMachineSizeTypesStandardD1', 'VirtualMachineSizeTypesStandardD2', 'VirtualMachineSizeTypesStandardD3', 'VirtualMachineSizeTypesStandardD4', 'VirtualMachineSizeTypesStandardD11', 'VirtualMachineSizeTypesStandardD12', 'VirtualMachineSizeTypesStandardD13', 'VirtualMachineSizeTypesStandardD14', 'VirtualMachineSizeTypesStandardD1V2', 'VirtualMachineSizeTypesStandardD2V2', 'VirtualMachineSizeTypesStandardD3V2', 'VirtualMachineSizeTypesStandardD4V2', 'VirtualMachineSizeTypesStandardD5V2', 'VirtualMachineSizeTypesStandardD2V3', 'VirtualMachineSizeTypesStandardD4V3', 'VirtualMachineSizeTypesStandardD8V3', 'VirtualMachineSizeTypesStandardD16V3', 'VirtualMachineSizeTypesStandardD32V3', 'VirtualMachineSizeTypesStandardD64V3', 'VirtualMachineSizeTypesStandardD2sV3', 'VirtualMachineSizeTypesStandardD4sV3', 'VirtualMachineSizeTypesStandardD8sV3', 'VirtualMachineSizeTypesStandardD16sV3', 'VirtualMachineSizeTypesStandardD32sV3', 'VirtualMachineSizeTypesStandardD64sV3', 'VirtualMachineSizeTypesStandardD11V2', 'VirtualMachineSizeTypesStandardD12V2', 'VirtualMachineSizeTypesStandardD13V2', 'VirtualMachineSizeTypesStandardD14V2', 'VirtualMachineSizeTypesStandardD15V2', 'VirtualMachineSizeTypesStandardDS1', 'VirtualMachineSizeTypesStandardDS2', 'VirtualMachineSizeTypesStandardDS3', 'VirtualMachineSizeTypesStandardDS4', 'VirtualMachineSizeTypesStandardDS11', 'VirtualMachineSizeTypesStandardDS12', 'VirtualMachineSizeTypesStandardDS13', 'VirtualMachineSizeTypesStandardDS14', 'VirtualMachineSizeTypesStandardDS1V2', 'VirtualMachineSizeTypesStandardDS2V2', 'VirtualMachineSizeTypesStandardDS3V2', 'VirtualMachineSizeTypesStandardDS4V2', 'VirtualMachineSizeTypesStandardDS5V2', 'VirtualMachineSizeTypesStandardDS11V2', 'VirtualMachineSizeTypesStandardDS12V2', 'VirtualMachineSizeTypesStandardDS13V2', 'VirtualMachineSizeTypesStandardDS14V2', 'VirtualMachineSizeTypesStandardDS15V2', 'VirtualMachineSizeTypesStandardDS134V2', 'VirtualMachineSizeTypesStandardDS132V2', 'VirtualMachineSizeTypesStandardDS148V2', 'VirtualMachineSizeTypesStandardDS144V2', 'VirtualMachineSizeTypesStandardE2V3', 'VirtualMachineSizeTypesStandardE4V3', 'VirtualMachineSizeTypesStandardE8V3', 'VirtualMachineSizeTypesStandardE16V3', 'VirtualMachineSizeTypesStandardE32V3', 'VirtualMachineSizeTypesStandardE64V3', 'VirtualMachineSizeTypesStandardE2sV3', 'VirtualMachineSizeTypesStandardE4sV3', 'VirtualMachineSizeTypesStandardE8sV3', 'VirtualMachineSizeTypesStandardE16sV3', 'VirtualMachineSizeTypesStandardE32sV3', 'VirtualMachineSizeTypesStandardE64sV3', 'VirtualMachineSizeTypesStandardE3216V3', 'VirtualMachineSizeTypesStandardE328sV3', 'VirtualMachineSizeTypesStandardE6432sV3', 'VirtualMachineSizeTypesStandardE6416sV3', 'VirtualMachineSizeTypesStandardF1', 'VirtualMachineSizeTypesStandardF2', 'VirtualMachineSizeTypesStandardF4', 'VirtualMachineSizeTypesStandardF8', 'VirtualMachineSizeTypesStandardF16', 'VirtualMachineSizeTypesStandardF1s', 'VirtualMachineSizeTypesStandardF2s', 'VirtualMachineSizeTypesStandardF4s', 'VirtualMachineSizeTypesStandardF8s', 'VirtualMachineSizeTypesStandardF16s', 'VirtualMachineSizeTypesStandardF2sV2', 'VirtualMachineSizeTypesStandardF4sV2', 'VirtualMachineSizeTypesStandardF8sV2', 'VirtualMachineSizeTypesStandardF16sV2', 'VirtualMachineSizeTypesStandardF32sV2', 'VirtualMachineSizeTypesStandardF64sV2', 'VirtualMachineSizeTypesStandardF72sV2', 'VirtualMachineSizeTypesStandardG1', 'VirtualMachineSizeTypesStandardG2', 'VirtualMachineSizeTypesStandardG3', 'VirtualMachineSizeTypesStandardG4', 'VirtualMachineSizeTypesStandardG5', 'VirtualMachineSizeTypesStandardGS1', 'VirtualMachineSizeTypesStandardGS2', 'VirtualMachineSizeTypesStandardGS3', 'VirtualMachineSizeTypesStandardGS4', 'VirtualMachineSizeTypesStandardGS5', 'VirtualMachineSizeTypesStandardGS48', 'VirtualMachineSizeTypesStandardGS44', 'VirtualMachineSizeTypesStandardGS516', 'VirtualMachineSizeTypesStandardGS58', 'VirtualMachineSizeTypesStandardH8', 'VirtualMachineSizeTypesStandardH16', 'VirtualMachineSizeTypesStandardH8m', 'VirtualMachineSizeTypesStandardH16m', 'VirtualMachineSizeTypesStandardH16r', 'VirtualMachineSizeTypesStandardH16mr', 'VirtualMachineSizeTypesStandardL4s', 'VirtualMachineSizeTypesStandardL8s', 'VirtualMachineSizeTypesStandardL16s', 'VirtualMachineSizeTypesStandardL32s', 'VirtualMachineSizeTypesStandardM64s', 'VirtualMachineSizeTypesStandardM64ms', 'VirtualMachineSizeTypesStandardM128s', 'VirtualMachineSizeTypesStandardM128ms', 'VirtualMachineSizeTypesStandardM6432ms', 'VirtualMachineSizeTypesStandardM6416ms', 'VirtualMachineSizeTypesStandardM12864ms', 'VirtualMachineSizeTypesStandardM12832ms', 'VirtualMachineSizeTypesStandardNC6', 'VirtualMachineSizeTypesStandardNC12', 'VirtualMachineSizeTypesStandardNC24', 'VirtualMachineSizeTypesStandardNC24r', 'VirtualMachineSizeTypesStandardNC6sV2', 'VirtualMachineSizeTypesStandardNC12sV2', 'VirtualMachineSizeTypesStandardNC24sV2', 'VirtualMachineSizeTypesStandardNC24rsV2', 'VirtualMachineSizeTypesStandardNC6sV3', 'VirtualMachineSizeTypesStandardNC12sV3', 'VirtualMachineSizeTypesStandardNC24sV3', 'VirtualMachineSizeTypesStandardNC24rsV3', 'VirtualMachineSizeTypesStandardND6s', 'VirtualMachineSizeTypesStandardND12s', 'VirtualMachineSizeTypesStandardND24s', 'VirtualMachineSizeTypesStandardND24rs', 'VirtualMachineSizeTypesStandardNV6', 'VirtualMachineSizeTypesStandardNV12', 'VirtualMachineSizeTypesStandardNV24'
- VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"`
- // VMSizeProperties - Specifies the properties for customizing the size of the virtual machine. Minimum api-version: 2021-07-01.
This feature is still in preview mode and is not supported for VirtualMachineScaleSet.
Please follow the instructions in [VM Customization](https://aka.ms/vmcustomization) for more details.
- VMSizeProperties *VMSizeProperties `json:"vmSizeProperties,omitempty"`
-}
-
-// Image the source user image virtual hard disk. The virtual hard disk will be copied before being
-// attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not
-// exist.
-type Image struct {
- autorest.Response `json:"-"`
- *ImageProperties `json:"properties,omitempty"`
- // ExtendedLocation - The extended location of the Image.
- ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for Image.
-func (i Image) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if i.ImageProperties != nil {
- objectMap["properties"] = i.ImageProperties
- }
- if i.ExtendedLocation != nil {
- objectMap["extendedLocation"] = i.ExtendedLocation
- }
- if i.Location != nil {
- objectMap["location"] = i.Location
- }
- if i.Tags != nil {
- objectMap["tags"] = i.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for Image struct.
-func (i *Image) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var imageProperties ImageProperties
- err = json.Unmarshal(*v, &imageProperties)
- if err != nil {
- return err
- }
- i.ImageProperties = &imageProperties
- }
- case "extendedLocation":
- if v != nil {
- var extendedLocation ExtendedLocation
- err = json.Unmarshal(*v, &extendedLocation)
- if err != nil {
- return err
- }
- i.ExtendedLocation = &extendedLocation
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- i.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- i.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- i.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- i.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- i.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// ImageDataDisk describes a data disk.
-type ImageDataDisk struct {
- // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
- Lun *int32 `json:"lun,omitempty"`
- // Snapshot - The snapshot.
- Snapshot *SubResource `json:"snapshot,omitempty"`
- // ManagedDisk - The managedDisk.
- ManagedDisk *SubResource `json:"managedDisk,omitempty"`
- // BlobURI - The Virtual Hard Disk.
- BlobURI *string `json:"blobUri,omitempty"`
- // Caching - Specifies the caching requirements.
Possible values are:
**None**
**ReadOnly**
**ReadWrite**
Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
- StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
- // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk.
- DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
-}
-
-// ImageDisk describes a image disk.
-type ImageDisk struct {
- // Snapshot - The snapshot.
- Snapshot *SubResource `json:"snapshot,omitempty"`
- // ManagedDisk - The managedDisk.
- ManagedDisk *SubResource `json:"managedDisk,omitempty"`
- // BlobURI - The Virtual Hard Disk.
- BlobURI *string `json:"blobUri,omitempty"`
- // Caching - Specifies the caching requirements.
Possible values are:
**None**
**ReadOnly**
**ReadWrite**
Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
- StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
- // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk.
- DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
-}
-
-// ImageDiskReference the source image used for creating the disk.
-type ImageDiskReference struct {
- // ID - A relative uri containing either a Platform Image Repository or user image reference.
- ID *string `json:"id,omitempty"`
- // Lun - If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null.
- Lun *int32 `json:"lun,omitempty"`
-}
-
-// ImageListResult the List Image operation response.
-type ImageListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of Images.
- Value *[]Image `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of Images. Call ListNext() with this to fetch the next page of Images.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// ImageListResultIterator provides access to a complete listing of Image values.
-type ImageListResultIterator struct {
- i int
- page ImageListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *ImageListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImageListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *ImageListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter ImageListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter ImageListResultIterator) Response() ImageListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter ImageListResultIterator) Value() Image {
- if !iter.page.NotDone() {
- return Image{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the ImageListResultIterator type.
-func NewImageListResultIterator(page ImageListResultPage) ImageListResultIterator {
- return ImageListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (ilr ImageListResult) IsEmpty() bool {
- return ilr.Value == nil || len(*ilr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (ilr ImageListResult) hasNextLink() bool {
- return ilr.NextLink != nil && len(*ilr.NextLink) != 0
-}
-
-// imageListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (ilr ImageListResult) imageListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !ilr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(ilr.NextLink)))
-}
-
-// ImageListResultPage contains a page of Image values.
-type ImageListResultPage struct {
- fn func(context.Context, ImageListResult) (ImageListResult, error)
- ilr ImageListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *ImageListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ImageListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.ilr)
- if err != nil {
- return err
- }
- page.ilr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *ImageListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page ImageListResultPage) NotDone() bool {
- return !page.ilr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page ImageListResultPage) Response() ImageListResult {
- return page.ilr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page ImageListResultPage) Values() []Image {
- if page.ilr.IsEmpty() {
- return nil
- }
- return *page.ilr.Value
-}
-
-// Creates a new instance of the ImageListResultPage type.
-func NewImageListResultPage(cur ImageListResult, getNextPage func(context.Context, ImageListResult) (ImageListResult, error)) ImageListResultPage {
- return ImageListResultPage{
- fn: getNextPage,
- ilr: cur,
- }
-}
-
-// ImageOSDisk describes an Operating System disk.
-type ImageOSDisk struct {
- // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from a custom image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - The OS State. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized'
- OsState OperatingSystemStateTypes `json:"osState,omitempty"`
- // Snapshot - The snapshot.
- Snapshot *SubResource `json:"snapshot,omitempty"`
- // ManagedDisk - The managedDisk.
- ManagedDisk *SubResource `json:"managedDisk,omitempty"`
- // BlobURI - The Virtual Hard Disk.
- BlobURI *string `json:"blobUri,omitempty"`
- // Caching - Specifies the caching requirements.
Possible values are:
**None**
**ReadOnly**
**ReadWrite**
Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
- StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
- // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed image disk.
- DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
-}
-
-// ImageProperties describes the properties of an Image.
-type ImageProperties struct {
- // SourceVirtualMachine - The source virtual machine from which Image is created.
- SourceVirtualMachine *SubResource `json:"sourceVirtualMachine,omitempty"`
- // StorageProfile - Specifies the storage settings for the virtual machine disks.
- StorageProfile *ImageStorageProfile `json:"storageProfile,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // HyperVGeneration - Specifies the HyperVGenerationType of the VirtualMachine created from the image. From API Version 2019-03-01 if the image source is a blob, then we need the user to specify the value, if the source is managed resource like disk or snapshot, we may require the user to specify the property if we cannot deduce it from the source managed resource. Possible values include: 'HyperVGenerationTypesV1', 'HyperVGenerationTypesV2'
- HyperVGeneration HyperVGenerationTypes `json:"hyperVGeneration,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ImageProperties.
-func (IP ImageProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if IP.SourceVirtualMachine != nil {
- objectMap["sourceVirtualMachine"] = IP.SourceVirtualMachine
- }
- if IP.StorageProfile != nil {
- objectMap["storageProfile"] = IP.StorageProfile
- }
- if IP.HyperVGeneration != "" {
- objectMap["hyperVGeneration"] = IP.HyperVGeneration
- }
- return json.Marshal(objectMap)
-}
-
-// ImagePurchasePlan describes the gallery image definition purchase plan. This is used by marketplace
-// images.
-type ImagePurchasePlan struct {
- // Name - The plan ID.
- Name *string `json:"name,omitempty"`
- // Publisher - The publisher ID.
- Publisher *string `json:"publisher,omitempty"`
- // Product - The product ID.
- Product *string `json:"product,omitempty"`
-}
-
-// ImageReference specifies information about the image to use. You can specify information about platform
-// images, marketplace images, or virtual machine images. This element is required when you want to use a
-// platform image, marketplace image, or virtual machine image, but is not used in other creation
-// operations. NOTE: Image reference publisher and offer can only be set when you create the scale set.
-type ImageReference struct {
- // Publisher - The image publisher.
- Publisher *string `json:"publisher,omitempty"`
- // Offer - Specifies the offer of the platform image or marketplace image used to create the virtual machine.
- Offer *string `json:"offer,omitempty"`
- // Sku - The image SKU.
- Sku *string `json:"sku,omitempty"`
- // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.
- Version *string `json:"version,omitempty"`
- // ExactVersion - READ-ONLY; Specifies in decimal numbers, the version of platform image or marketplace image used to create the virtual machine. This readonly field differs from 'version', only if the value specified in 'version' field is 'latest'.
- ExactVersion *string `json:"exactVersion,omitempty"`
- // SharedGalleryImageID - Specified the shared gallery image unique id for vm deployment. This can be fetched from shared gallery image GET call.
- SharedGalleryImageID *string `json:"sharedGalleryImageId,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ImageReference.
-func (ir ImageReference) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ir.Publisher != nil {
- objectMap["publisher"] = ir.Publisher
- }
- if ir.Offer != nil {
- objectMap["offer"] = ir.Offer
- }
- if ir.Sku != nil {
- objectMap["sku"] = ir.Sku
- }
- if ir.Version != nil {
- objectMap["version"] = ir.Version
- }
- if ir.SharedGalleryImageID != nil {
- objectMap["sharedGalleryImageId"] = ir.SharedGalleryImageID
- }
- if ir.ID != nil {
- objectMap["id"] = ir.ID
- }
- return json.Marshal(objectMap)
-}
-
-// ImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type ImagesCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(ImagesClient) (Image, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *ImagesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for ImagesCreateOrUpdateFuture.Result.
-func (future *ImagesCreateOrUpdateFuture) result(client ImagesClient) (i Image, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- i.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent {
- i, err = client.CreateOrUpdateResponder(i.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
-type ImagesDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(ImagesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *ImagesDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for ImagesDeleteFuture.Result.
-func (future *ImagesDeleteFuture) result(client ImagesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// ImageStorageProfile describes a storage profile.
-type ImageStorageProfile struct {
- // OsDisk - Specifies information about the operating system disk used by the virtual machine.
For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview).
- OsDisk *ImageOSDisk `json:"osDisk,omitempty"`
- // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.
For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview).
- DataDisks *[]ImageDataDisk `json:"dataDisks,omitempty"`
- // ZoneResilient - Specifies whether an image is zone resilient or not. Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage (ZRS).
- ZoneResilient *bool `json:"zoneResilient,omitempty"`
-}
-
-// ImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
-type ImagesUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(ImagesClient) (Image, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *ImagesUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for ImagesUpdateFuture.Result.
-func (future *ImagesUpdateFuture) result(client ImagesClient) (i Image, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- i.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent {
- i, err = client.UpdateResponder(i.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// ImageUpdate the source user image virtual hard disk. Only tags may be updated.
-type ImageUpdate struct {
- *ImageProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for ImageUpdate.
-func (iu ImageUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if iu.ImageProperties != nil {
- objectMap["properties"] = iu.ImageProperties
- }
- if iu.Tags != nil {
- objectMap["tags"] = iu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for ImageUpdate struct.
-func (iu *ImageUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var imageProperties ImageProperties
- err = json.Unmarshal(*v, &imageProperties)
- if err != nil {
- return err
- }
- iu.ImageProperties = &imageProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- iu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// InnerError inner error details.
-type InnerError struct {
- // Exceptiontype - The exception type.
- Exceptiontype *string `json:"exceptiontype,omitempty"`
- // Errordetail - The internal error message or exception dump.
- Errordetail *string `json:"errordetail,omitempty"`
-}
-
-// InstanceSku ...
-type InstanceSku struct {
- // Name - READ-ONLY; The sku name.
- Name *string `json:"name,omitempty"`
- // Tier - READ-ONLY; The tier of the cloud service role instance.
- Tier *string `json:"tier,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for InstanceSku.
-func (is InstanceSku) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// InstanceViewStatus instance view status.
-type InstanceViewStatus struct {
- // Code - The status code.
- Code *string `json:"code,omitempty"`
- // Level - The level code. Possible values include: 'StatusLevelTypesInfo', 'StatusLevelTypesWarning', 'StatusLevelTypesError'
- Level StatusLevelTypes `json:"level,omitempty"`
- // DisplayStatus - The short localizable label for the status.
- DisplayStatus *string `json:"displayStatus,omitempty"`
- // Message - The detailed status message, including for alerts and error messages.
- Message *string `json:"message,omitempty"`
- // Time - The time of the status.
- Time *date.Time `json:"time,omitempty"`
-}
-
-// InstanceViewStatusesSummary instance view statuses.
-type InstanceViewStatusesSummary struct {
- // StatusesSummary - READ-ONLY
- StatusesSummary *[]StatusCodeCount `json:"statusesSummary,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for InstanceViewStatusesSummary.
-func (ivss InstanceViewStatusesSummary) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// KeyForDiskEncryptionSet key Vault Key Url to be used for server side encryption of Managed Disks and
-// Snapshots
-type KeyForDiskEncryptionSet struct {
- // SourceVault - Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription.
- SourceVault *SourceVault `json:"sourceVault,omitempty"`
- // KeyURL - Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value.
- KeyURL *string `json:"keyUrl,omitempty"`
-}
-
-// KeyVaultAndKeyReference key Vault Key Url and vault id of KeK, KeK is optional and when provided is used
-// to unwrap the encryptionKey
-type KeyVaultAndKeyReference struct {
- // SourceVault - Resource id of the KeyVault containing the key or secret
- SourceVault *SourceVault `json:"sourceVault,omitempty"`
- // KeyURL - Url pointing to a key or secret in KeyVault
- KeyURL *string `json:"keyUrl,omitempty"`
-}
-
-// KeyVaultAndSecretReference key Vault Secret Url and vault id of the encryption key
-type KeyVaultAndSecretReference struct {
- // SourceVault - Resource id of the KeyVault containing the key or secret
- SourceVault *SourceVault `json:"sourceVault,omitempty"`
- // SecretURL - Url pointing to a key or secret in KeyVault
- SecretURL *string `json:"secretUrl,omitempty"`
-}
-
-// KeyVaultKeyReference describes a reference to Key Vault Key
-type KeyVaultKeyReference struct {
- // KeyURL - The URL referencing a key encryption key in Key Vault.
- KeyURL *string `json:"keyUrl,omitempty"`
- // SourceVault - The relative URL of the Key Vault containing the key.
- SourceVault *SubResource `json:"sourceVault,omitempty"`
-}
-
-// KeyVaultSecretReference describes a reference to Key Vault Secret
-type KeyVaultSecretReference struct {
- // SecretURL - The URL referencing a secret in a Key Vault.
- SecretURL *string `json:"secretUrl,omitempty"`
- // SourceVault - The relative URL of the Key Vault containing the secret.
- SourceVault *SubResource `json:"sourceVault,omitempty"`
-}
-
-// LastPatchInstallationSummary describes the properties of the last installed patch summary.
-type LastPatchInstallationSummary struct {
- // Status - READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. At that point it will become "Unknown", "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values include: 'PatchOperationStatusUnknown', 'PatchOperationStatusInProgress', 'PatchOperationStatusFailed', 'PatchOperationStatusSucceeded', 'PatchOperationStatusCompletedWithWarnings'
- Status PatchOperationStatus `json:"status,omitempty"`
- // InstallationActivityID - READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension logs.
- InstallationActivityID *string `json:"installationActivityId,omitempty"`
- // MaintenanceWindowExceeded - READ-ONLY; Describes whether the operation ran out of time before it completed all its intended actions
- MaintenanceWindowExceeded *bool `json:"maintenanceWindowExceeded,omitempty"`
- // NotSelectedPatchCount - READ-ONLY; The number of all available patches but not going to be installed because it didn't match a classification or inclusion list entry.
- NotSelectedPatchCount *int32 `json:"notSelectedPatchCount,omitempty"`
- // ExcludedPatchCount - READ-ONLY; The number of all available patches but excluded explicitly by a customer-specified exclusion list match.
- ExcludedPatchCount *int32 `json:"excludedPatchCount,omitempty"`
- // PendingPatchCount - READ-ONLY; The number of all available patches expected to be installed over the course of the patch installation operation.
- PendingPatchCount *int32 `json:"pendingPatchCount,omitempty"`
- // InstalledPatchCount - READ-ONLY; The count of patches that successfully installed.
- InstalledPatchCount *int32 `json:"installedPatchCount,omitempty"`
- // FailedPatchCount - READ-ONLY; The count of patches that failed installation.
- FailedPatchCount *int32 `json:"failedPatchCount,omitempty"`
- // StartTime - READ-ONLY; The UTC timestamp when the operation began.
- StartTime *date.Time `json:"startTime,omitempty"`
- // LastModifiedTime - READ-ONLY; The UTC timestamp when the operation began.
- LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
- // Error - READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them.
- Error *APIError `json:"error,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for LastPatchInstallationSummary.
-func (lpis LastPatchInstallationSummary) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// LinuxConfiguration specifies the Linux operating system settings on the virtual machine.
For a
-// list of supported Linux distributions, see [Linux on Azure-Endorsed
-// Distributions](https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros).
-type LinuxConfiguration struct {
- // DisablePasswordAuthentication - Specifies whether password authentication should be disabled.
- DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"`
- // SSH - Specifies the ssh key configuration for a Linux OS.
- SSH *SSHConfiguration `json:"ssh,omitempty"`
- // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.
When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later.
- ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"`
- // PatchSettings - [Preview Feature] Specifies settings related to VM Guest Patching on Linux.
- PatchSettings *LinuxPatchSettings `json:"patchSettings,omitempty"`
-}
-
-// LinuxParameters input for InstallPatches on a Linux VM, as directly received by the API
-type LinuxParameters struct {
- // ClassificationsToInclude - The update classifications to select when installing patches for Linux.
- ClassificationsToInclude *[]VMGuestPatchClassificationLinux `json:"classificationsToInclude,omitempty"`
- // PackageNameMasksToInclude - packages to include in the patch operation. Format: packageName_packageVersion
- PackageNameMasksToInclude *[]string `json:"packageNameMasksToInclude,omitempty"`
- // PackageNameMasksToExclude - packages to exclude in the patch operation. Format: packageName_packageVersion
- PackageNameMasksToExclude *[]string `json:"packageNameMasksToExclude,omitempty"`
- // MaintenanceRunID - This is used as a maintenance run identifier for Auto VM Guest Patching in Linux.
- MaintenanceRunID *string `json:"maintenanceRunId,omitempty"`
-}
-
-// LinuxPatchSettings specifies settings related to VM Guest Patching on Linux.
-type LinuxPatchSettings struct {
- // PatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated to virtual machine scale set with OrchestrationMode as Flexible.
Possible values are:
**ImageDefault** - The virtual machine's default patching configuration is used.
**AutomaticByPlatform** - The virtual machine will be automatically updated by the platform. The property provisionVMAgent must be true. Possible values include: 'LinuxVMGuestPatchModeImageDefault', 'LinuxVMGuestPatchModeAutomaticByPlatform'
- PatchMode LinuxVMGuestPatchMode `json:"patchMode,omitempty"`
- // AssessmentMode - Specifies the mode of VM Guest Patch Assessment for the IaaS virtual machine.
Possible values are:
**ImageDefault** - You control the timing of patch assessments on a virtual machine.
**AutomaticByPlatform** - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. Possible values include: 'LinuxPatchAssessmentModeImageDefault', 'LinuxPatchAssessmentModeAutomaticByPlatform'
- AssessmentMode LinuxPatchAssessmentMode `json:"assessmentMode,omitempty"`
-}
-
-// ListUsagesResult the List Usages operation response.
-type ListUsagesResult struct {
- autorest.Response `json:"-"`
- // Value - The list of compute resource usages.
- Value *[]Usage `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page of compute resource usage information.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// ListUsagesResultIterator provides access to a complete listing of Usage values.
-type ListUsagesResultIterator struct {
- i int
- page ListUsagesResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *ListUsagesResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ListUsagesResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *ListUsagesResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter ListUsagesResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter ListUsagesResultIterator) Response() ListUsagesResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter ListUsagesResultIterator) Value() Usage {
- if !iter.page.NotDone() {
- return Usage{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the ListUsagesResultIterator type.
-func NewListUsagesResultIterator(page ListUsagesResultPage) ListUsagesResultIterator {
- return ListUsagesResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (lur ListUsagesResult) IsEmpty() bool {
- return lur.Value == nil || len(*lur.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (lur ListUsagesResult) hasNextLink() bool {
- return lur.NextLink != nil && len(*lur.NextLink) != 0
-}
-
-// listUsagesResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (lur ListUsagesResult) listUsagesResultPreparer(ctx context.Context) (*http.Request, error) {
- if !lur.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(lur.NextLink)))
-}
-
-// ListUsagesResultPage contains a page of Usage values.
-type ListUsagesResultPage struct {
- fn func(context.Context, ListUsagesResult) (ListUsagesResult, error)
- lur ListUsagesResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *ListUsagesResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ListUsagesResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.lur)
- if err != nil {
- return err
- }
- page.lur = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *ListUsagesResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page ListUsagesResultPage) NotDone() bool {
- return !page.lur.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page ListUsagesResultPage) Response() ListUsagesResult {
- return page.lur
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page ListUsagesResultPage) Values() []Usage {
- if page.lur.IsEmpty() {
- return nil
- }
- return *page.lur.Value
-}
-
-// Creates a new instance of the ListUsagesResultPage type.
-func NewListUsagesResultPage(cur ListUsagesResult, getNextPage func(context.Context, ListUsagesResult) (ListUsagesResult, error)) ListUsagesResultPage {
- return ListUsagesResultPage{
- fn: getNextPage,
- lur: cur,
- }
-}
-
-// ListVirtualMachineExtensionImage ...
-type ListVirtualMachineExtensionImage struct {
- autorest.Response `json:"-"`
- Value *[]VirtualMachineExtensionImage `json:"value,omitempty"`
-}
-
-// ListVirtualMachineImageResource ...
-type ListVirtualMachineImageResource struct {
- autorest.Response `json:"-"`
- Value *[]VirtualMachineImageResource `json:"value,omitempty"`
-}
-
-// LoadBalancerConfiguration describes the load balancer configuration.
-type LoadBalancerConfiguration struct {
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
- // Name - The name of the Load balancer
- Name *string `json:"name,omitempty"`
- // Properties - Properties of the load balancer configuration.
- Properties *LoadBalancerConfigurationProperties `json:"properties,omitempty"`
-}
-
-// LoadBalancerConfigurationProperties ...
-type LoadBalancerConfigurationProperties struct {
- // FrontendIPConfigurations - Specifies the frontend IP to be used for the load balancer. Only IPv4 frontend IP address is supported. Each load balancer configuration must have exactly one frontend IP configuration.
- FrontendIPConfigurations *[]LoadBalancerFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"`
-}
-
-// LoadBalancerFrontendIPConfiguration ...
-type LoadBalancerFrontendIPConfiguration struct {
- // Name - The name of the resource that is unique within the set of frontend IP configurations used by the load balancer. This name can be used to access the resource.
- Name *string `json:"name,omitempty"`
- // Properties - Properties of load balancer frontend ip configuration.
- Properties *LoadBalancerFrontendIPConfigurationProperties `json:"properties,omitempty"`
-}
-
-// LoadBalancerFrontendIPConfigurationProperties describes a cloud service IP Configuration
-type LoadBalancerFrontendIPConfigurationProperties struct {
- // PublicIPAddress - The reference to the public ip address resource.
- PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"`
- // Subnet - The reference to the virtual network subnet resource.
- Subnet *SubResource `json:"subnet,omitempty"`
- // PrivateIPAddress - The virtual network private IP address of the IP configuration.
- PrivateIPAddress *string `json:"privateIPAddress,omitempty"`
-}
-
-// LogAnalyticsExportRequestRateByIntervalFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type LogAnalyticsExportRequestRateByIntervalFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *LogAnalyticsExportRequestRateByIntervalFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for LogAnalyticsExportRequestRateByIntervalFuture.Result.
-func (future *LogAnalyticsExportRequestRateByIntervalFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- laor.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent {
- laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type LogAnalyticsExportThrottledRequestsFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(LogAnalyticsClient) (LogAnalyticsOperationResult, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *LogAnalyticsExportThrottledRequestsFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for LogAnalyticsExportThrottledRequestsFuture.Result.
-func (future *LogAnalyticsExportThrottledRequestsFuture) result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- laor.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent {
- laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// LogAnalyticsInputBase api input base class for LogAnalytics Api.
-type LogAnalyticsInputBase struct {
- // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
- BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"`
- // FromTime - From time of the query
- FromTime *date.Time `json:"fromTime,omitempty"`
- // ToTime - To time of the query
- ToTime *date.Time `json:"toTime,omitempty"`
- // GroupByThrottlePolicy - Group query result by Throttle Policy applied.
- GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"`
- // GroupByOperationName - Group query result by Operation Name.
- GroupByOperationName *bool `json:"groupByOperationName,omitempty"`
- // GroupByResourceName - Group query result by Resource Name.
- GroupByResourceName *bool `json:"groupByResourceName,omitempty"`
- // GroupByClientApplicationID - Group query result by Client Application ID.
- GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"`
- // GroupByUserAgent - Group query result by User Agent.
- GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"`
-}
-
-// LogAnalyticsOperationResult logAnalytics operation status response
-type LogAnalyticsOperationResult struct {
- autorest.Response `json:"-"`
- // Properties - READ-ONLY; LogAnalyticsOutput
- Properties *LogAnalyticsOutput `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for LogAnalyticsOperationResult.
-func (laor LogAnalyticsOperationResult) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// LogAnalyticsOutput logAnalytics output properties
-type LogAnalyticsOutput struct {
- // Output - READ-ONLY; Output file Uri path to blob container.
- Output *string `json:"output,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for LogAnalyticsOutput.
-func (lao LogAnalyticsOutput) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// MaintenanceRedeployStatus maintenance Operation Status.
-type MaintenanceRedeployStatus struct {
- // IsCustomerInitiatedMaintenanceAllowed - True, if customer is allowed to perform Maintenance.
- IsCustomerInitiatedMaintenanceAllowed *bool `json:"isCustomerInitiatedMaintenanceAllowed,omitempty"`
- // PreMaintenanceWindowStartTime - Start Time for the Pre Maintenance Window.
- PreMaintenanceWindowStartTime *date.Time `json:"preMaintenanceWindowStartTime,omitempty"`
- // PreMaintenanceWindowEndTime - End Time for the Pre Maintenance Window.
- PreMaintenanceWindowEndTime *date.Time `json:"preMaintenanceWindowEndTime,omitempty"`
- // MaintenanceWindowStartTime - Start Time for the Maintenance Window.
- MaintenanceWindowStartTime *date.Time `json:"maintenanceWindowStartTime,omitempty"`
- // MaintenanceWindowEndTime - End Time for the Maintenance Window.
- MaintenanceWindowEndTime *date.Time `json:"maintenanceWindowEndTime,omitempty"`
- // LastOperationResultCode - The Last Maintenance Operation Result Code. Possible values include: 'MaintenanceOperationResultCodeTypesNone', 'MaintenanceOperationResultCodeTypesRetryLater', 'MaintenanceOperationResultCodeTypesMaintenanceAborted', 'MaintenanceOperationResultCodeTypesMaintenanceCompleted'
- LastOperationResultCode MaintenanceOperationResultCodeTypes `json:"lastOperationResultCode,omitempty"`
- // LastOperationMessage - Message returned for the last Maintenance Operation.
- LastOperationMessage *string `json:"lastOperationMessage,omitempty"`
-}
-
-// ManagedArtifact the managed artifact.
-type ManagedArtifact struct {
- // ID - The managed artifact id.
- ID *string `json:"id,omitempty"`
-}
-
-// ManagedDiskParameters the parameters of a managed disk.
-type ManagedDiskParameters struct {
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
- StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
- // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk.
- DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// NetworkInterfaceReference describes a network interface reference.
-type NetworkInterfaceReference struct {
- *NetworkInterfaceReferenceProperties `json:"properties,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for NetworkInterfaceReference.
-func (nir NetworkInterfaceReference) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if nir.NetworkInterfaceReferenceProperties != nil {
- objectMap["properties"] = nir.NetworkInterfaceReferenceProperties
- }
- if nir.ID != nil {
- objectMap["id"] = nir.ID
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for NetworkInterfaceReference struct.
-func (nir *NetworkInterfaceReference) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var networkInterfaceReferenceProperties NetworkInterfaceReferenceProperties
- err = json.Unmarshal(*v, &networkInterfaceReferenceProperties)
- if err != nil {
- return err
- }
- nir.NetworkInterfaceReferenceProperties = &networkInterfaceReferenceProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- nir.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// NetworkInterfaceReferenceProperties describes a network interface reference properties.
-type NetworkInterfaceReferenceProperties struct {
- // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface.
- Primary *bool `json:"primary,omitempty"`
- // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
- DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
-}
-
-// NetworkProfile specifies the network interfaces or the networking configuration of the virtual machine.
-type NetworkProfile struct {
- // NetworkInterfaces - Specifies the list of resource Ids for the network interfaces associated with the virtual machine.
- NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"`
- // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
- NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"`
- // NetworkInterfaceConfigurations - Specifies the networking configurations that will be used to create the virtual machine networking resources.
- NetworkInterfaceConfigurations *[]VirtualMachineNetworkInterfaceConfiguration `json:"networkInterfaceConfigurations,omitempty"`
-}
-
-// OperationListResult the List Compute Operation operation response.
-type OperationListResult struct {
- autorest.Response `json:"-"`
- // Value - READ-ONLY; The list of compute operations
- Value *[]OperationValue `json:"value,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OperationListResult.
-func (olr OperationListResult) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// OperationValue describes the properties of a Compute Operation value.
-type OperationValue struct {
- // Origin - READ-ONLY; The origin of the compute operation.
- Origin *string `json:"origin,omitempty"`
- // Name - READ-ONLY; The name of the compute operation.
- Name *string `json:"name,omitempty"`
- *OperationValueDisplay `json:"display,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OperationValue.
-func (ov OperationValue) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ov.OperationValueDisplay != nil {
- objectMap["display"] = ov.OperationValueDisplay
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for OperationValue struct.
-func (ov *OperationValue) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "origin":
- if v != nil {
- var origin string
- err = json.Unmarshal(*v, &origin)
- if err != nil {
- return err
- }
- ov.Origin = &origin
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- ov.Name = &name
- }
- case "display":
- if v != nil {
- var operationValueDisplay OperationValueDisplay
- err = json.Unmarshal(*v, &operationValueDisplay)
- if err != nil {
- return err
- }
- ov.OperationValueDisplay = &operationValueDisplay
- }
- }
- }
-
- return nil
-}
-
-// OperationValueDisplay describes the properties of a Compute Operation Value Display.
-type OperationValueDisplay struct {
- // Operation - READ-ONLY; The display name of the compute operation.
- Operation *string `json:"operation,omitempty"`
- // Resource - READ-ONLY; The display name of the resource the operation applies to.
- Resource *string `json:"resource,omitempty"`
- // Description - READ-ONLY; The description of the operation.
- Description *string `json:"description,omitempty"`
- // Provider - READ-ONLY; The resource provider for the operation.
- Provider *string `json:"provider,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OperationValueDisplay.
-func (ovd OperationValueDisplay) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// OrchestrationServiceStateInput the input for OrchestrationServiceState
-type OrchestrationServiceStateInput struct {
- // ServiceName - The name of the service. Possible values include: 'OrchestrationServiceNamesAutomaticRepairs'
- ServiceName OrchestrationServiceNames `json:"serviceName,omitempty"`
- // Action - The action to be performed. Possible values include: 'OrchestrationServiceStateActionResume', 'OrchestrationServiceStateActionSuspend'
- Action OrchestrationServiceStateAction `json:"action,omitempty"`
-}
-
-// OrchestrationServiceSummary summary for an orchestration service of a virtual machine scale set.
-type OrchestrationServiceSummary struct {
- // ServiceName - READ-ONLY; The name of the service. Possible values include: 'OrchestrationServiceNamesAutomaticRepairs', 'OrchestrationServiceNamesDummyOrchestrationServiceName'
- ServiceName OrchestrationServiceNames `json:"serviceName,omitempty"`
- // ServiceState - READ-ONLY; The current state of the service. Possible values include: 'OrchestrationServiceStateNotRunning', 'OrchestrationServiceStateRunning', 'OrchestrationServiceStateSuspended'
- ServiceState OrchestrationServiceState `json:"serviceState,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OrchestrationServiceSummary.
-func (oss OrchestrationServiceSummary) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// OSDisk specifies information about the operating system disk used by the virtual machine.
For
-// more information about disks, see [About disks and VHDs for Azure virtual
-// machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview).
-type OSDisk struct {
- // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // EncryptionSettings - Specifies the encryption settings for the OS Disk.
Minimum api-version: 2015-06-15
- EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"`
- // Name - The disk name.
- Name *string `json:"name,omitempty"`
- // Vhd - The virtual hard disk.
- Vhd *VirtualHardDisk `json:"vhd,omitempty"`
- // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist.
- Image *VirtualHardDisk `json:"image,omitempty"`
- // Caching - Specifies the caching requirements.
Possible values are:
**None**
**ReadOnly**
**ReadWrite**
Default: **None** for Standard storage. **ReadOnly** for Premium storage. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk.
- WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
- // DiffDiskSettings - Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine.
- DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"`
- // CreateOption - Specifies how the virtual machine should be created.
Possible values are:
**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.
**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach'
- CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
- // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // ManagedDisk - The managed disk parameters.
- ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
- // DeleteOption - Specifies whether OS Disk should be deleted or detached upon VM deletion.
Possible values:
**Delete** If this value is used, the OS disk is deleted when VM is deleted.
**Detach** If this value is used, the os disk is retained after VM is deleted.
The default value is set to **detach**. For an ephemeral OS Disk, the default value is set to **Delete**. User cannot change the delete option for ephemeral OS Disk. Possible values include: 'DiskDeleteOptionTypesDelete', 'DiskDeleteOptionTypesDetach'
- DeleteOption DiskDeleteOptionTypes `json:"deleteOption,omitempty"`
-}
-
-// OSDiskImage contains the os disk image information.
-type OSDiskImage struct {
- // OperatingSystem - The operating system of the osDiskImage. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"`
-}
-
-// OSDiskImageEncryption contains encryption settings for an OS disk image.
-type OSDiskImageEncryption struct {
- // DiskEncryptionSetID - A relative URI containing the resource ID of the disk encryption set.
- DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"`
-}
-
-// OSFamily describes a cloud service OS family.
-type OSFamily struct {
- autorest.Response `json:"-"`
- // ID - READ-ONLY; Resource Id.
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name.
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type.
- Type *string `json:"type,omitempty"`
- // Location - READ-ONLY; Resource location.
- Location *string `json:"location,omitempty"`
- Properties *OSFamilyProperties `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OSFamily.
-func (of OSFamily) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if of.Properties != nil {
- objectMap["properties"] = of.Properties
- }
- return json.Marshal(objectMap)
-}
-
-// OSFamilyListResult ...
-type OSFamilyListResult struct {
- autorest.Response `json:"-"`
- Value *[]OSFamily `json:"value,omitempty"`
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// OSFamilyListResultIterator provides access to a complete listing of OSFamily values.
-type OSFamilyListResultIterator struct {
- i int
- page OSFamilyListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *OSFamilyListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OSFamilyListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *OSFamilyListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter OSFamilyListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter OSFamilyListResultIterator) Response() OSFamilyListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter OSFamilyListResultIterator) Value() OSFamily {
- if !iter.page.NotDone() {
- return OSFamily{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the OSFamilyListResultIterator type.
-func NewOSFamilyListResultIterator(page OSFamilyListResultPage) OSFamilyListResultIterator {
- return OSFamilyListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (oflr OSFamilyListResult) IsEmpty() bool {
- return oflr.Value == nil || len(*oflr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (oflr OSFamilyListResult) hasNextLink() bool {
- return oflr.NextLink != nil && len(*oflr.NextLink) != 0
-}
-
-// oSFamilyListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (oflr OSFamilyListResult) oSFamilyListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !oflr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(oflr.NextLink)))
-}
-
-// OSFamilyListResultPage contains a page of OSFamily values.
-type OSFamilyListResultPage struct {
- fn func(context.Context, OSFamilyListResult) (OSFamilyListResult, error)
- oflr OSFamilyListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *OSFamilyListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OSFamilyListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.oflr)
- if err != nil {
- return err
- }
- page.oflr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *OSFamilyListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page OSFamilyListResultPage) NotDone() bool {
- return !page.oflr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page OSFamilyListResultPage) Response() OSFamilyListResult {
- return page.oflr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page OSFamilyListResultPage) Values() []OSFamily {
- if page.oflr.IsEmpty() {
- return nil
- }
- return *page.oflr.Value
-}
-
-// Creates a new instance of the OSFamilyListResultPage type.
-func NewOSFamilyListResultPage(cur OSFamilyListResult, getNextPage func(context.Context, OSFamilyListResult) (OSFamilyListResult, error)) OSFamilyListResultPage {
- return OSFamilyListResultPage{
- fn: getNextPage,
- oflr: cur,
- }
-}
-
-// OSFamilyProperties OS family properties.
-type OSFamilyProperties struct {
- // Name - READ-ONLY; The OS family name.
- Name *string `json:"name,omitempty"`
- // Label - READ-ONLY; The OS family label.
- Label *string `json:"label,omitempty"`
- // Versions - READ-ONLY; List of OS versions belonging to this family.
- Versions *[]OSVersionPropertiesBase `json:"versions,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OSFamilyProperties.
-func (ofp OSFamilyProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// OSProfile specifies the operating system settings for the virtual machine. Some of the settings cannot
-// be changed once VM is provisioned.
-type OSProfile struct {
- // ComputerName - Specifies the host OS name of the virtual machine.
This name cannot be updated after the VM is created.
**Max-length (Windows):** 15 characters
**Max-length (Linux):** 64 characters.
For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/azure-resource-manager/management/resource-name-rules).
- ComputerName *string `json:"computerName,omitempty"`
- // AdminUsername - Specifies the name of the administrator account.
This property cannot be updated after the VM is created.
**Windows-only restriction:** Cannot end in "."
**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".
**Minimum-length (Linux):** 1 character
**Max-length (Linux):** 64 characters
**Max-length (Windows):** 20 characters.
- AdminUsername *string `json:"adminUsername,omitempty"`
- // AdminPassword - Specifies the password of the administrator account.
**Minimum-length (Windows):** 8 characters
**Minimum-length (Linux):** 6 characters
**Max-length (Windows):** 123 characters
**Max-length (Linux):** 72 characters
**Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
Has lower characters
Has upper characters
Has a digit
Has a special character (Regex match [\W_])
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"
For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp)
For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection)
- AdminPassword *string `json:"adminPassword,omitempty"`
- // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.
**Note: Do not pass any secrets or passwords in customData property**
This property cannot be updated after the VM is created.
customData is passed to the VM to be saved as a file, for more information see [Custom Data on Azure VMs](https://azure.microsoft.com/blog/custom-data-and-cloud-init-on-windows-azure/)
For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init)
- CustomData *string `json:"customData,omitempty"`
- // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine.
- WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"`
- // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.
For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros).
- LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"`
- // Secrets - Specifies set of certificates that should be installed onto the virtual machine. To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows).
- Secrets *[]VaultSecretGroup `json:"secrets,omitempty"`
- // AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine.
This may only be set to False when no extensions are present on the virtual machine.
- AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"`
- // RequireGuestProvisionSignal - Specifies whether the guest provision signal is required to infer provision success of the virtual machine. **Note: This property is for private testing only, and all customers must not set the property to false.**
- RequireGuestProvisionSignal *bool `json:"requireGuestProvisionSignal,omitempty"`
-}
-
-// OSVersion describes a cloud service OS version.
-type OSVersion struct {
- autorest.Response `json:"-"`
- // ID - READ-ONLY; Resource Id.
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name.
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type.
- Type *string `json:"type,omitempty"`
- // Location - READ-ONLY; Resource location.
- Location *string `json:"location,omitempty"`
- Properties *OSVersionProperties `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OSVersion.
-func (ov OSVersion) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ov.Properties != nil {
- objectMap["properties"] = ov.Properties
- }
- return json.Marshal(objectMap)
-}
-
-// OSVersionListResult ...
-type OSVersionListResult struct {
- autorest.Response `json:"-"`
- Value *[]OSVersion `json:"value,omitempty"`
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// OSVersionListResultIterator provides access to a complete listing of OSVersion values.
-type OSVersionListResultIterator struct {
- i int
- page OSVersionListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *OSVersionListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OSVersionListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *OSVersionListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter OSVersionListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter OSVersionListResultIterator) Response() OSVersionListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter OSVersionListResultIterator) Value() OSVersion {
- if !iter.page.NotDone() {
- return OSVersion{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the OSVersionListResultIterator type.
-func NewOSVersionListResultIterator(page OSVersionListResultPage) OSVersionListResultIterator {
- return OSVersionListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (ovlr OSVersionListResult) IsEmpty() bool {
- return ovlr.Value == nil || len(*ovlr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (ovlr OSVersionListResult) hasNextLink() bool {
- return ovlr.NextLink != nil && len(*ovlr.NextLink) != 0
-}
-
-// oSVersionListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (ovlr OSVersionListResult) oSVersionListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !ovlr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(ovlr.NextLink)))
-}
-
-// OSVersionListResultPage contains a page of OSVersion values.
-type OSVersionListResultPage struct {
- fn func(context.Context, OSVersionListResult) (OSVersionListResult, error)
- ovlr OSVersionListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *OSVersionListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OSVersionListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.ovlr)
- if err != nil {
- return err
- }
- page.ovlr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *OSVersionListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page OSVersionListResultPage) NotDone() bool {
- return !page.ovlr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page OSVersionListResultPage) Response() OSVersionListResult {
- return page.ovlr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page OSVersionListResultPage) Values() []OSVersion {
- if page.ovlr.IsEmpty() {
- return nil
- }
- return *page.ovlr.Value
-}
-
-// Creates a new instance of the OSVersionListResultPage type.
-func NewOSVersionListResultPage(cur OSVersionListResult, getNextPage func(context.Context, OSVersionListResult) (OSVersionListResult, error)) OSVersionListResultPage {
- return OSVersionListResultPage{
- fn: getNextPage,
- ovlr: cur,
- }
-}
-
-// OSVersionProperties OS version properties.
-type OSVersionProperties struct {
- // Family - READ-ONLY; The family of this OS version.
- Family *string `json:"family,omitempty"`
- // FamilyLabel - READ-ONLY; The family label of this OS version.
- FamilyLabel *string `json:"familyLabel,omitempty"`
- // Version - READ-ONLY; The OS version.
- Version *string `json:"version,omitempty"`
- // Label - READ-ONLY; The OS version label.
- Label *string `json:"label,omitempty"`
- // IsDefault - READ-ONLY; Specifies whether this is the default OS version for its family.
- IsDefault *bool `json:"isDefault,omitempty"`
- // IsActive - READ-ONLY; Specifies whether this OS version is active.
- IsActive *bool `json:"isActive,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OSVersionProperties.
-func (ovp OSVersionProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// OSVersionPropertiesBase configuration view of an OS version.
-type OSVersionPropertiesBase struct {
- // Version - READ-ONLY; The OS version.
- Version *string `json:"version,omitempty"`
- // Label - READ-ONLY; The OS version label.
- Label *string `json:"label,omitempty"`
- // IsDefault - READ-ONLY; Specifies whether this is the default OS version for its family.
- IsDefault *bool `json:"isDefault,omitempty"`
- // IsActive - READ-ONLY; Specifies whether this OS version is active.
- IsActive *bool `json:"isActive,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for OSVersionPropertiesBase.
-func (ovpb OSVersionPropertiesBase) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// PatchInstallationDetail information about a specific patch that was encountered during an installation
-// action.
-type PatchInstallationDetail struct {
- // PatchID - READ-ONLY; A unique identifier for the patch.
- PatchID *string `json:"patchId,omitempty"`
- // Name - READ-ONLY; The friendly name of the patch.
- Name *string `json:"name,omitempty"`
- // Version - READ-ONLY; The version string of the package. It may conform to Semantic Versioning. Only applies to Linux.
- Version *string `json:"version,omitempty"`
- // KbID - READ-ONLY; The KBID of the patch. Only applies to Windows patches.
- KbID *string `json:"kbId,omitempty"`
- // Classifications - READ-ONLY; The classification(s) of the patch as provided by the patch publisher.
- Classifications *[]string `json:"classifications,omitempty"`
- // InstallationState - READ-ONLY; The state of the patch after the installation operation completed. Possible values include: 'PatchInstallationStateUnknown', 'PatchInstallationStateInstalled', 'PatchInstallationStateFailed', 'PatchInstallationStateExcluded', 'PatchInstallationStateNotSelected', 'PatchInstallationStatePending'
- InstallationState PatchInstallationState `json:"installationState,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PatchInstallationDetail.
-func (pid PatchInstallationDetail) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// PatchSettings specifies settings related to VM Guest Patching on Windows.
-type PatchSettings struct {
- // PatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated to virtual machine scale set with OrchestrationMode as Flexible.
Possible values are:
**Manual** - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the property WindowsConfiguration.enableAutomaticUpdates must be false
**AutomaticByOS** - The virtual machine will automatically be updated by the OS. The property WindowsConfiguration.enableAutomaticUpdates must be true.
**AutomaticByPlatform** - the virtual machine will automatically updated by the platform. The properties provisionVMAgent and WindowsConfiguration.enableAutomaticUpdates must be true. Possible values include: 'WindowsVMGuestPatchModeManual', 'WindowsVMGuestPatchModeAutomaticByOS', 'WindowsVMGuestPatchModeAutomaticByPlatform'
- PatchMode WindowsVMGuestPatchMode `json:"patchMode,omitempty"`
- // EnableHotpatching - Enables customers to patch their Azure VMs without requiring a reboot. For enableHotpatching, the 'provisionVMAgent' must be set to true and 'patchMode' must be set to 'AutomaticByPlatform'.
- EnableHotpatching *bool `json:"enableHotpatching,omitempty"`
- // AssessmentMode - Specifies the mode of VM Guest patch assessment for the IaaS virtual machine.
Possible values are:
**ImageDefault** - You control the timing of patch assessments on a virtual machine.
**AutomaticByPlatform** - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. Possible values include: 'WindowsPatchAssessmentModeImageDefault', 'WindowsPatchAssessmentModeAutomaticByPlatform'
- AssessmentMode WindowsPatchAssessmentMode `json:"assessmentMode,omitempty"`
-}
-
-// PirCommunityGalleryResource base information about the community gallery resource in pir.
-type PirCommunityGalleryResource struct {
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- *CommunityGalleryIdentifier `json:"identifier,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PirCommunityGalleryResource.
-func (pcgr PirCommunityGalleryResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if pcgr.CommunityGalleryIdentifier != nil {
- objectMap["identifier"] = pcgr.CommunityGalleryIdentifier
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for PirCommunityGalleryResource struct.
-func (pcgr *PirCommunityGalleryResource) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- pcgr.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- pcgr.Location = &location
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- pcgr.Type = &typeVar
- }
- case "identifier":
- if v != nil {
- var communityGalleryIdentifier CommunityGalleryIdentifier
- err = json.Unmarshal(*v, &communityGalleryIdentifier)
- if err != nil {
- return err
- }
- pcgr.CommunityGalleryIdentifier = &communityGalleryIdentifier
- }
- }
- }
-
- return nil
-}
-
-// PirResource the Resource model definition.
-type PirResource struct {
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PirResource.
-func (pr PirResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// PirSharedGalleryResource base information about the shared gallery resource in pir.
-type PirSharedGalleryResource struct {
- *SharedGalleryIdentifier `json:"identifier,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PirSharedGalleryResource.
-func (psgr PirSharedGalleryResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if psgr.SharedGalleryIdentifier != nil {
- objectMap["identifier"] = psgr.SharedGalleryIdentifier
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for PirSharedGalleryResource struct.
-func (psgr *PirSharedGalleryResource) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "identifier":
- if v != nil {
- var sharedGalleryIdentifier SharedGalleryIdentifier
- err = json.Unmarshal(*v, &sharedGalleryIdentifier)
- if err != nil {
- return err
- }
- psgr.SharedGalleryIdentifier = &sharedGalleryIdentifier
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- psgr.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- psgr.Location = &location
- }
- }
- }
-
- return nil
-}
-
-// Plan specifies information about the marketplace image used to create the virtual machine. This element
-// is only used for marketplace images. Before you can use a marketplace image from an API, you must enable
-// the image for programmatic use. In the Azure portal, find the marketplace image that you want to use
-// and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and
-// then click **Save**.
-type Plan struct {
- // Name - The plan ID.
- Name *string `json:"name,omitempty"`
- // Publisher - The publisher ID.
- Publisher *string `json:"publisher,omitempty"`
- // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
- Product *string `json:"product,omitempty"`
- // PromotionCode - The promotion code.
- PromotionCode *string `json:"promotionCode,omitempty"`
-}
-
-// PrivateEndpoint the Private Endpoint resource.
-type PrivateEndpoint struct {
- // ID - READ-ONLY; The ARM identifier for Private Endpoint
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PrivateEndpoint.
-func (peVar PrivateEndpoint) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// PrivateEndpointConnection the Private Endpoint Connection resource.
-type PrivateEndpointConnection struct {
- autorest.Response `json:"-"`
- // PrivateEndpointConnectionProperties - Resource properties.
- *PrivateEndpointConnectionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; private endpoint connection Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; private endpoint connection name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; private endpoint connection type
- Type *string `json:"type,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PrivateEndpointConnection.
-func (pec PrivateEndpointConnection) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if pec.PrivateEndpointConnectionProperties != nil {
- objectMap["properties"] = pec.PrivateEndpointConnectionProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for PrivateEndpointConnection struct.
-func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var privateEndpointConnectionProperties PrivateEndpointConnectionProperties
- err = json.Unmarshal(*v, &privateEndpointConnectionProperties)
- if err != nil {
- return err
- }
- pec.PrivateEndpointConnectionProperties = &privateEndpointConnectionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- pec.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- pec.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- pec.Type = &typeVar
- }
- }
- }
-
- return nil
-}
-
-// PrivateEndpointConnectionListResult a list of private link resources
-type PrivateEndpointConnectionListResult struct {
- autorest.Response `json:"-"`
- // Value - Array of private endpoint connections
- Value *[]PrivateEndpointConnection `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// PrivateEndpointConnectionListResultIterator provides access to a complete listing of
-// PrivateEndpointConnection values.
-type PrivateEndpointConnectionListResultIterator struct {
- i int
- page PrivateEndpointConnectionListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *PrivateEndpointConnectionListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *PrivateEndpointConnectionListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter PrivateEndpointConnectionListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter PrivateEndpointConnectionListResultIterator) Response() PrivateEndpointConnectionListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter PrivateEndpointConnectionListResultIterator) Value() PrivateEndpointConnection {
- if !iter.page.NotDone() {
- return PrivateEndpointConnection{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the PrivateEndpointConnectionListResultIterator type.
-func NewPrivateEndpointConnectionListResultIterator(page PrivateEndpointConnectionListResultPage) PrivateEndpointConnectionListResultIterator {
- return PrivateEndpointConnectionListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (peclr PrivateEndpointConnectionListResult) IsEmpty() bool {
- return peclr.Value == nil || len(*peclr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (peclr PrivateEndpointConnectionListResult) hasNextLink() bool {
- return peclr.NextLink != nil && len(*peclr.NextLink) != 0
-}
-
-// privateEndpointConnectionListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (peclr PrivateEndpointConnectionListResult) privateEndpointConnectionListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !peclr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(peclr.NextLink)))
-}
-
-// PrivateEndpointConnectionListResultPage contains a page of PrivateEndpointConnection values.
-type PrivateEndpointConnectionListResultPage struct {
- fn func(context.Context, PrivateEndpointConnectionListResult) (PrivateEndpointConnectionListResult, error)
- peclr PrivateEndpointConnectionListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *PrivateEndpointConnectionListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.peclr)
- if err != nil {
- return err
- }
- page.peclr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *PrivateEndpointConnectionListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page PrivateEndpointConnectionListResultPage) NotDone() bool {
- return !page.peclr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page PrivateEndpointConnectionListResultPage) Response() PrivateEndpointConnectionListResult {
- return page.peclr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page PrivateEndpointConnectionListResultPage) Values() []PrivateEndpointConnection {
- if page.peclr.IsEmpty() {
- return nil
- }
- return *page.peclr.Value
-}
-
-// Creates a new instance of the PrivateEndpointConnectionListResultPage type.
-func NewPrivateEndpointConnectionListResultPage(cur PrivateEndpointConnectionListResult, getNextPage func(context.Context, PrivateEndpointConnectionListResult) (PrivateEndpointConnectionListResult, error)) PrivateEndpointConnectionListResultPage {
- return PrivateEndpointConnectionListResultPage{
- fn: getNextPage,
- peclr: cur,
- }
-}
-
-// PrivateEndpointConnectionProperties properties of the PrivateEndpointConnectProperties.
-type PrivateEndpointConnectionProperties struct {
- // PrivateEndpoint - READ-ONLY; The resource of private end point.
- PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"`
- // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between DiskAccess and Virtual Network.
- PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"`
- // ProvisioningState - The provisioning state of the private endpoint connection resource. Possible values include: 'PrivateEndpointConnectionProvisioningStateSucceeded', 'PrivateEndpointConnectionProvisioningStateCreating', 'PrivateEndpointConnectionProvisioningStateDeleting', 'PrivateEndpointConnectionProvisioningStateFailed'
- ProvisioningState PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PrivateEndpointConnectionProperties.
-func (pecp PrivateEndpointConnectionProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if pecp.PrivateLinkServiceConnectionState != nil {
- objectMap["privateLinkServiceConnectionState"] = pecp.PrivateLinkServiceConnectionState
- }
- if pecp.ProvisioningState != "" {
- objectMap["provisioningState"] = pecp.ProvisioningState
- }
- return json.Marshal(objectMap)
-}
-
-// PrivateLinkResource a private link resource
-type PrivateLinkResource struct {
- // PrivateLinkResourceProperties - Resource properties.
- *PrivateLinkResourceProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; private link resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; private link resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; private link resource type
- Type *string `json:"type,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PrivateLinkResource.
-func (plr PrivateLinkResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if plr.PrivateLinkResourceProperties != nil {
- objectMap["properties"] = plr.PrivateLinkResourceProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for PrivateLinkResource struct.
-func (plr *PrivateLinkResource) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var privateLinkResourceProperties PrivateLinkResourceProperties
- err = json.Unmarshal(*v, &privateLinkResourceProperties)
- if err != nil {
- return err
- }
- plr.PrivateLinkResourceProperties = &privateLinkResourceProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- plr.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- plr.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- plr.Type = &typeVar
- }
- }
- }
-
- return nil
-}
-
-// PrivateLinkResourceListResult a list of private link resources
-type PrivateLinkResourceListResult struct {
- autorest.Response `json:"-"`
- // Value - Array of private link resources
- Value *[]PrivateLinkResource `json:"value,omitempty"`
-}
-
-// PrivateLinkResourceProperties properties of a private link resource.
-type PrivateLinkResourceProperties struct {
- // GroupID - READ-ONLY; The private link resource group id.
- GroupID *string `json:"groupId,omitempty"`
- // RequiredMembers - READ-ONLY; The private link resource required member names.
- RequiredMembers *[]string `json:"requiredMembers,omitempty"`
- // RequiredZoneNames - The private link resource DNS zone name.
- RequiredZoneNames *[]string `json:"requiredZoneNames,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for PrivateLinkResourceProperties.
-func (plrp PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if plrp.RequiredZoneNames != nil {
- objectMap["requiredZoneNames"] = plrp.RequiredZoneNames
- }
- return json.Marshal(objectMap)
-}
-
-// PrivateLinkServiceConnectionState a collection of information about the state of the connection between
-// service consumer and provider.
-type PrivateLinkServiceConnectionState struct {
- // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'PrivateEndpointServiceConnectionStatusPending', 'PrivateEndpointServiceConnectionStatusApproved', 'PrivateEndpointServiceConnectionStatusRejected'
- Status PrivateEndpointServiceConnectionStatus `json:"status,omitempty"`
- // Description - The reason for approval/rejection of the connection.
- Description *string `json:"description,omitempty"`
- // ActionsRequired - A message indicating if changes on the service provider require any updates on the consumer.
- ActionsRequired *string `json:"actionsRequired,omitempty"`
-}
-
-// PropertyUpdatesInProgress properties of the disk for which update is pending.
-type PropertyUpdatesInProgress struct {
- // TargetTier - The target performance tier of the disk if a tier change operation is in progress.
- TargetTier *string `json:"targetTier,omitempty"`
-}
-
-// ProximityPlacementGroup specifies information about the proximity placement group.
-type ProximityPlacementGroup struct {
- autorest.Response `json:"-"`
- // ProximityPlacementGroupProperties - Describes the properties of a Proximity Placement Group.
- *ProximityPlacementGroupProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for ProximityPlacementGroup.
-func (ppg ProximityPlacementGroup) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ppg.ProximityPlacementGroupProperties != nil {
- objectMap["properties"] = ppg.ProximityPlacementGroupProperties
- }
- if ppg.Location != nil {
- objectMap["location"] = ppg.Location
- }
- if ppg.Tags != nil {
- objectMap["tags"] = ppg.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for ProximityPlacementGroup struct.
-func (ppg *ProximityPlacementGroup) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var proximityPlacementGroupProperties ProximityPlacementGroupProperties
- err = json.Unmarshal(*v, &proximityPlacementGroupProperties)
- if err != nil {
- return err
- }
- ppg.ProximityPlacementGroupProperties = &proximityPlacementGroupProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- ppg.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- ppg.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- ppg.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- ppg.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- ppg.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// ProximityPlacementGroupListResult the List Proximity Placement Group operation response.
-type ProximityPlacementGroupListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of proximity placement groups
- Value *[]ProximityPlacementGroup `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of proximity placement groups.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// ProximityPlacementGroupListResultIterator provides access to a complete listing of
-// ProximityPlacementGroup values.
-type ProximityPlacementGroupListResultIterator struct {
- i int
- page ProximityPlacementGroupListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *ProximityPlacementGroupListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *ProximityPlacementGroupListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter ProximityPlacementGroupListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter ProximityPlacementGroupListResultIterator) Response() ProximityPlacementGroupListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter ProximityPlacementGroupListResultIterator) Value() ProximityPlacementGroup {
- if !iter.page.NotDone() {
- return ProximityPlacementGroup{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the ProximityPlacementGroupListResultIterator type.
-func NewProximityPlacementGroupListResultIterator(page ProximityPlacementGroupListResultPage) ProximityPlacementGroupListResultIterator {
- return ProximityPlacementGroupListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (ppglr ProximityPlacementGroupListResult) IsEmpty() bool {
- return ppglr.Value == nil || len(*ppglr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (ppglr ProximityPlacementGroupListResult) hasNextLink() bool {
- return ppglr.NextLink != nil && len(*ppglr.NextLink) != 0
-}
-
-// proximityPlacementGroupListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (ppglr ProximityPlacementGroupListResult) proximityPlacementGroupListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !ppglr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(ppglr.NextLink)))
-}
-
-// ProximityPlacementGroupListResultPage contains a page of ProximityPlacementGroup values.
-type ProximityPlacementGroupListResultPage struct {
- fn func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error)
- ppglr ProximityPlacementGroupListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *ProximityPlacementGroupListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.ppglr)
- if err != nil {
- return err
- }
- page.ppglr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *ProximityPlacementGroupListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page ProximityPlacementGroupListResultPage) NotDone() bool {
- return !page.ppglr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page ProximityPlacementGroupListResultPage) Response() ProximityPlacementGroupListResult {
- return page.ppglr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page ProximityPlacementGroupListResultPage) Values() []ProximityPlacementGroup {
- if page.ppglr.IsEmpty() {
- return nil
- }
- return *page.ppglr.Value
-}
-
-// Creates a new instance of the ProximityPlacementGroupListResultPage type.
-func NewProximityPlacementGroupListResultPage(cur ProximityPlacementGroupListResult, getNextPage func(context.Context, ProximityPlacementGroupListResult) (ProximityPlacementGroupListResult, error)) ProximityPlacementGroupListResultPage {
- return ProximityPlacementGroupListResultPage{
- fn: getNextPage,
- ppglr: cur,
- }
-}
-
-// ProximityPlacementGroupProperties describes the properties of a Proximity Placement Group.
-type ProximityPlacementGroupProperties struct {
- // ProximityPlacementGroupType - Specifies the type of the proximity placement group.
Possible values are:
**Standard** : Co-locate resources within an Azure region or Availability Zone.
**Ultra** : For future use. Possible values include: 'ProximityPlacementGroupTypeStandard', 'ProximityPlacementGroupTypeUltra'
- ProximityPlacementGroupType ProximityPlacementGroupType `json:"proximityPlacementGroupType,omitempty"`
- // VirtualMachines - READ-ONLY; A list of references to all virtual machines in the proximity placement group.
- VirtualMachines *[]SubResourceWithColocationStatus `json:"virtualMachines,omitempty"`
- // VirtualMachineScaleSets - READ-ONLY; A list of references to all virtual machine scale sets in the proximity placement group.
- VirtualMachineScaleSets *[]SubResourceWithColocationStatus `json:"virtualMachineScaleSets,omitempty"`
- // AvailabilitySets - READ-ONLY; A list of references to all availability sets in the proximity placement group.
- AvailabilitySets *[]SubResourceWithColocationStatus `json:"availabilitySets,omitempty"`
- // ColocationStatus - Describes colocation status of the Proximity Placement Group.
- ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ProximityPlacementGroupProperties.
-func (ppgp ProximityPlacementGroupProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ppgp.ProximityPlacementGroupType != "" {
- objectMap["proximityPlacementGroupType"] = ppgp.ProximityPlacementGroupType
- }
- if ppgp.ColocationStatus != nil {
- objectMap["colocationStatus"] = ppgp.ColocationStatus
- }
- return json.Marshal(objectMap)
-}
-
-// ProximityPlacementGroupUpdate specifies information about the proximity placement group.
-type ProximityPlacementGroupUpdate struct {
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for ProximityPlacementGroupUpdate.
-func (ppgu ProximityPlacementGroupUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ppgu.Tags != nil {
- objectMap["tags"] = ppgu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// ProxyOnlyResource the ProxyOnly Resource model definition.
-type ProxyOnlyResource struct {
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ProxyOnlyResource.
-func (por ProxyOnlyResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ProxyResource the resource model definition for an Azure Resource Manager proxy resource. It will not
-// have tags and a location
-type ProxyResource struct {
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ProxyResource.
-func (pr ProxyResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// PublicIPAddressSku describes the public IP Sku
-type PublicIPAddressSku struct {
- // Name - Specify public IP sku name. Possible values include: 'PublicIPAddressSkuNameBasic', 'PublicIPAddressSkuNameStandard'
- Name PublicIPAddressSkuName `json:"name,omitempty"`
- // Tier - Specify public IP sku tier. Possible values include: 'PublicIPAddressSkuTierRegional', 'PublicIPAddressSkuTierGlobal'
- Tier PublicIPAddressSkuTier `json:"tier,omitempty"`
-}
-
-// PurchasePlan used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
-type PurchasePlan struct {
- // Publisher - The publisher ID.
- Publisher *string `json:"publisher,omitempty"`
- // Name - The plan ID.
- Name *string `json:"name,omitempty"`
- // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
- Product *string `json:"product,omitempty"`
- // PromotionCode - The Offer Promotion Code.
- PromotionCode *string `json:"promotionCode,omitempty"`
-}
-
-// ReadCloser ...
-type ReadCloser struct {
- autorest.Response `json:"-"`
- Value *io.ReadCloser `json:"value,omitempty"`
-}
-
-// RecommendedMachineConfiguration the properties describe the recommended machine configuration for this
-// Image Definition. These properties are updatable.
-type RecommendedMachineConfiguration struct {
- VCPUs *ResourceRange `json:"vCPUs,omitempty"`
- Memory *ResourceRange `json:"memory,omitempty"`
-}
-
-// RecoveryWalkResponse response after calling a manual recovery walk
-type RecoveryWalkResponse struct {
- autorest.Response `json:"-"`
- // WalkPerformed - READ-ONLY; Whether the recovery walk was performed
- WalkPerformed *bool `json:"walkPerformed,omitempty"`
- // NextPlatformUpdateDomain - READ-ONLY; The next update domain that needs to be walked. Null means walk spanning all update domains has been completed
- NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RecoveryWalkResponse.
-func (rwr RecoveryWalkResponse) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RegionalReplicationStatus this is the regional replication status.
-type RegionalReplicationStatus struct {
- // Region - READ-ONLY; The region to which the gallery image version is being replicated to.
- Region *string `json:"region,omitempty"`
- // State - READ-ONLY; This is the regional replication state. Possible values include: 'ReplicationStateUnknown', 'ReplicationStateReplicating', 'ReplicationStateCompleted', 'ReplicationStateFailed'
- State ReplicationState `json:"state,omitempty"`
- // Details - READ-ONLY; The details of the replication status.
- Details *string `json:"details,omitempty"`
- // Progress - READ-ONLY; It indicates progress of the replication job.
- Progress *int32 `json:"progress,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RegionalReplicationStatus.
-func (rrs RegionalReplicationStatus) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ReplicationStatus this is the replication status of the gallery image version.
-type ReplicationStatus struct {
- // AggregatedState - READ-ONLY; This is the aggregated replication status based on all the regional replication status flags. Possible values include: 'AggregatedReplicationStateUnknown', 'AggregatedReplicationStateInProgress', 'AggregatedReplicationStateCompleted', 'AggregatedReplicationStateFailed'
- AggregatedState AggregatedReplicationState `json:"aggregatedState,omitempty"`
- // Summary - READ-ONLY; This is a summary of replication status for each region.
- Summary *[]RegionalReplicationStatus `json:"summary,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ReplicationStatus.
-func (rs ReplicationStatus) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api.
-type RequestRateByIntervalInput struct {
- // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'IntervalInMinsThreeMins', 'IntervalInMinsFiveMins', 'IntervalInMinsThirtyMins', 'IntervalInMinsSixtyMins'
- IntervalLength IntervalInMins `json:"intervalLength,omitempty"`
- // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
- BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"`
- // FromTime - From time of the query
- FromTime *date.Time `json:"fromTime,omitempty"`
- // ToTime - To time of the query
- ToTime *date.Time `json:"toTime,omitempty"`
- // GroupByThrottlePolicy - Group query result by Throttle Policy applied.
- GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"`
- // GroupByOperationName - Group query result by Operation Name.
- GroupByOperationName *bool `json:"groupByOperationName,omitempty"`
- // GroupByResourceName - Group query result by Resource Name.
- GroupByResourceName *bool `json:"groupByResourceName,omitempty"`
- // GroupByClientApplicationID - Group query result by Client Application ID.
- GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"`
- // GroupByUserAgent - Group query result by User Agent.
- GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"`
-}
-
-// Resource the Resource model definition.
-type Resource struct {
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for Resource.
-func (r Resource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if r.Location != nil {
- objectMap["location"] = r.Location
- }
- if r.Tags != nil {
- objectMap["tags"] = r.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// ResourceInstanceViewStatus instance view status.
-type ResourceInstanceViewStatus struct {
- // Code - READ-ONLY; The status code.
- Code *string `json:"code,omitempty"`
- // DisplayStatus - READ-ONLY; The short localizable label for the status.
- DisplayStatus *string `json:"displayStatus,omitempty"`
- // Message - READ-ONLY; The detailed status message, including for alerts and error messages.
- Message *string `json:"message,omitempty"`
- // Time - READ-ONLY; The time of the status.
- Time *date.Time `json:"time,omitempty"`
- // Level - The level code. Possible values include: 'StatusLevelTypesInfo', 'StatusLevelTypesWarning', 'StatusLevelTypesError'
- Level StatusLevelTypes `json:"level,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceInstanceViewStatus.
-func (rivs ResourceInstanceViewStatus) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if rivs.Level != "" {
- objectMap["level"] = rivs.Level
- }
- return json.Marshal(objectMap)
-}
-
-// ResourceRange describes the resource range.
-type ResourceRange struct {
- // Min - The minimum number of the resource.
- Min *int32 `json:"min,omitempty"`
- // Max - The maximum number of the resource.
- Max *int32 `json:"max,omitempty"`
-}
-
-// ResourceSku describes an available Compute SKU.
-type ResourceSku struct {
- // ResourceType - READ-ONLY; The type of resource the SKU applies to.
- ResourceType *string `json:"resourceType,omitempty"`
- // Name - READ-ONLY; The name of SKU.
- Name *string `json:"name,omitempty"`
- // Tier - READ-ONLY; Specifies the tier of virtual machines in a scale set.
Possible Values:
**Standard**
**Basic**
- Tier *string `json:"tier,omitempty"`
- // Size - READ-ONLY; The Size of the SKU.
- Size *string `json:"size,omitempty"`
- // Family - READ-ONLY; The Family of this particular SKU.
- Family *string `json:"family,omitempty"`
- // Kind - READ-ONLY; The Kind of resources that are supported in this SKU.
- Kind *string `json:"kind,omitempty"`
- // Capacity - READ-ONLY; Specifies the number of virtual machines in the scale set.
- Capacity *ResourceSkuCapacity `json:"capacity,omitempty"`
- // Locations - READ-ONLY; The set of locations that the SKU is available.
- Locations *[]string `json:"locations,omitempty"`
- // LocationInfo - READ-ONLY; A list of locations and availability zones in those locations where the SKU is available.
- LocationInfo *[]ResourceSkuLocationInfo `json:"locationInfo,omitempty"`
- // APIVersions - READ-ONLY; The api versions that support this SKU.
- APIVersions *[]string `json:"apiVersions,omitempty"`
- // Costs - READ-ONLY; Metadata for retrieving price info.
- Costs *[]ResourceSkuCosts `json:"costs,omitempty"`
- // Capabilities - READ-ONLY; A name value pair to describe the capability.
- Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"`
- // Restrictions - READ-ONLY; The restrictions because of which SKU cannot be used. This is empty if there are no restrictions.
- Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceSku.
-func (rs ResourceSku) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ResourceSkuCapabilities describes The SKU capabilities object.
-type ResourceSkuCapabilities struct {
- // Name - READ-ONLY; An invariant to describe the feature.
- Name *string `json:"name,omitempty"`
- // Value - READ-ONLY; An invariant if the feature is measured by quantity.
- Value *string `json:"value,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceSkuCapabilities.
-func (rsc ResourceSkuCapabilities) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ResourceSkuCapacity describes scaling information of a SKU.
-type ResourceSkuCapacity struct {
- // Minimum - READ-ONLY; The minimum capacity.
- Minimum *int64 `json:"minimum,omitempty"`
- // Maximum - READ-ONLY; The maximum capacity that can be set.
- Maximum *int64 `json:"maximum,omitempty"`
- // Default - READ-ONLY; The default capacity.
- Default *int64 `json:"default,omitempty"`
- // ScaleType - READ-ONLY; The scale type applicable to the sku. Possible values include: 'ResourceSkuCapacityScaleTypeAutomatic', 'ResourceSkuCapacityScaleTypeManual', 'ResourceSkuCapacityScaleTypeNone'
- ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceSkuCapacity.
-func (rsc ResourceSkuCapacity) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ResourceSkuCosts describes metadata for retrieving price info.
-type ResourceSkuCosts struct {
- // MeterID - READ-ONLY; Used for querying price from commerce.
- MeterID *string `json:"meterID,omitempty"`
- // Quantity - READ-ONLY; The multiplier is needed to extend the base metered cost.
- Quantity *int64 `json:"quantity,omitempty"`
- // ExtendedUnit - READ-ONLY; An invariant to show the extended unit.
- ExtendedUnit *string `json:"extendedUnit,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceSkuCosts.
-func (rsc ResourceSkuCosts) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ResourceSkuLocationInfo describes an available Compute SKU Location Information.
-type ResourceSkuLocationInfo struct {
- // Location - READ-ONLY; Location of the SKU
- Location *string `json:"location,omitempty"`
- // Zones - READ-ONLY; List of availability zones where the SKU is supported.
- Zones *[]string `json:"zones,omitempty"`
- // ZoneDetails - READ-ONLY; Details of capabilities available to a SKU in specific zones.
- ZoneDetails *[]ResourceSkuZoneDetails `json:"zoneDetails,omitempty"`
- // ExtendedLocations - READ-ONLY; The names of extended locations.
- ExtendedLocations *[]string `json:"extendedLocations,omitempty"`
- // Type - READ-ONLY; The type of the extended location. Possible values include: 'ExtendedLocationTypeEdgeZone'
- Type ExtendedLocationType `json:"type,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceSkuLocationInfo.
-func (rsli ResourceSkuLocationInfo) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ResourceSkuRestrictionInfo describes an available Compute SKU Restriction Information.
-type ResourceSkuRestrictionInfo struct {
- // Locations - READ-ONLY; Locations where the SKU is restricted
- Locations *[]string `json:"locations,omitempty"`
- // Zones - READ-ONLY; List of availability zones where the SKU is restricted.
- Zones *[]string `json:"zones,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceSkuRestrictionInfo.
-func (rsri ResourceSkuRestrictionInfo) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ResourceSkuRestrictions describes scaling information of a SKU.
-type ResourceSkuRestrictions struct {
- // Type - READ-ONLY; The type of restrictions. Possible values include: 'ResourceSkuRestrictionsTypeLocation', 'ResourceSkuRestrictionsTypeZone'
- Type ResourceSkuRestrictionsType `json:"type,omitempty"`
- // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted.
- Values *[]string `json:"values,omitempty"`
- // RestrictionInfo - READ-ONLY; The information about the restriction where the SKU cannot be used.
- RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"`
- // ReasonCode - READ-ONLY; The reason for restriction. Possible values include: 'ResourceSkuRestrictionsReasonCodeQuotaID', 'ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription'
- ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceSkuRestrictions.
-func (rsr ResourceSkuRestrictions) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ResourceSkusResult the List Resource Skus operation response.
-type ResourceSkusResult struct {
- autorest.Response `json:"-"`
- // Value - The list of skus available for the subscription.
- Value *[]ResourceSku `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of Resource Skus. Call ListNext() with this URI to fetch the next page of Resource Skus
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// ResourceSkusResultIterator provides access to a complete listing of ResourceSku values.
-type ResourceSkusResultIterator struct {
- i int
- page ResourceSkusResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *ResourceSkusResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *ResourceSkusResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter ResourceSkusResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter ResourceSkusResultIterator) Response() ResourceSkusResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter ResourceSkusResultIterator) Value() ResourceSku {
- if !iter.page.NotDone() {
- return ResourceSku{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the ResourceSkusResultIterator type.
-func NewResourceSkusResultIterator(page ResourceSkusResultPage) ResourceSkusResultIterator {
- return ResourceSkusResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (rsr ResourceSkusResult) IsEmpty() bool {
- return rsr.Value == nil || len(*rsr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (rsr ResourceSkusResult) hasNextLink() bool {
- return rsr.NextLink != nil && len(*rsr.NextLink) != 0
-}
-
-// resourceSkusResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (rsr ResourceSkusResult) resourceSkusResultPreparer(ctx context.Context) (*http.Request, error) {
- if !rsr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(rsr.NextLink)))
-}
-
-// ResourceSkusResultPage contains a page of ResourceSku values.
-type ResourceSkusResultPage struct {
- fn func(context.Context, ResourceSkusResult) (ResourceSkusResult, error)
- rsr ResourceSkusResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *ResourceSkusResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.rsr)
- if err != nil {
- return err
- }
- page.rsr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *ResourceSkusResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page ResourceSkusResultPage) NotDone() bool {
- return !page.rsr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page ResourceSkusResultPage) Response() ResourceSkusResult {
- return page.rsr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page ResourceSkusResultPage) Values() []ResourceSku {
- if page.rsr.IsEmpty() {
- return nil
- }
- return *page.rsr.Value
-}
-
-// Creates a new instance of the ResourceSkusResultPage type.
-func NewResourceSkusResultPage(cur ResourceSkusResult, getNextPage func(context.Context, ResourceSkusResult) (ResourceSkusResult, error)) ResourceSkusResultPage {
- return ResourceSkusResultPage{
- fn: getNextPage,
- rsr: cur,
- }
-}
-
-// ResourceSkuZoneDetails describes The zonal capabilities of a SKU.
-type ResourceSkuZoneDetails struct {
- // Name - READ-ONLY; The set of zones that the SKU is available in with the specified capabilities.
- Name *[]string `json:"name,omitempty"`
- // Capabilities - READ-ONLY; A list of capabilities that are available for the SKU in the specified list of zones.
- Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ResourceSkuZoneDetails.
-func (rszd ResourceSkuZoneDetails) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// ResourceURIList the List resources which are encrypted with the disk encryption set.
-type ResourceURIList struct {
- autorest.Response `json:"-"`
- // Value - A list of IDs or Owner IDs of resources which are encrypted with the disk encryption set.
- Value *[]string `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of encrypted resources. Call ListNext() with this to fetch the next page of encrypted resources.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// ResourceURIListIterator provides access to a complete listing of string values.
-type ResourceURIListIterator struct {
- i int
- page ResourceURIListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *ResourceURIListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ResourceURIListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *ResourceURIListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter ResourceURIListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter ResourceURIListIterator) Response() ResourceURIList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter ResourceURIListIterator) Value() string {
- if !iter.page.NotDone() {
- return ""
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the ResourceURIListIterator type.
-func NewResourceURIListIterator(page ResourceURIListPage) ResourceURIListIterator {
- return ResourceURIListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (rul ResourceURIList) IsEmpty() bool {
- return rul.Value == nil || len(*rul.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (rul ResourceURIList) hasNextLink() bool {
- return rul.NextLink != nil && len(*rul.NextLink) != 0
-}
-
-// resourceURIListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (rul ResourceURIList) resourceURIListPreparer(ctx context.Context) (*http.Request, error) {
- if !rul.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(rul.NextLink)))
-}
-
-// ResourceURIListPage contains a page of string values.
-type ResourceURIListPage struct {
- fn func(context.Context, ResourceURIList) (ResourceURIList, error)
- rul ResourceURIList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *ResourceURIListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ResourceURIListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.rul)
- if err != nil {
- return err
- }
- page.rul = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *ResourceURIListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page ResourceURIListPage) NotDone() bool {
- return !page.rul.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page ResourceURIListPage) Response() ResourceURIList {
- return page.rul
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page ResourceURIListPage) Values() []string {
- if page.rul.IsEmpty() {
- return nil
- }
- return *page.rul.Value
-}
-
-// Creates a new instance of the ResourceURIListPage type.
-func NewResourceURIListPage(cur ResourceURIList, getNextPage func(context.Context, ResourceURIList) (ResourceURIList, error)) ResourceURIListPage {
- return ResourceURIListPage{
- fn: getNextPage,
- rul: cur,
- }
-}
-
-// RestorePoint restore Point details.
-type RestorePoint struct {
- autorest.Response `json:"-"`
- *RestorePointProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RestorePoint.
-func (rp RestorePoint) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if rp.RestorePointProperties != nil {
- objectMap["properties"] = rp.RestorePointProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for RestorePoint struct.
-func (rp *RestorePoint) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var restorePointProperties RestorePointProperties
- err = json.Unmarshal(*v, &restorePointProperties)
- if err != nil {
- return err
- }
- rp.RestorePointProperties = &restorePointProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- rp.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- rp.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- rp.Type = &typeVar
- }
- }
- }
-
- return nil
-}
-
-// RestorePointCollection create or update Restore Point collection parameters.
-type RestorePointCollection struct {
- autorest.Response `json:"-"`
- *RestorePointCollectionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for RestorePointCollection.
-func (RPCVar RestorePointCollection) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if RPCVar.RestorePointCollectionProperties != nil {
- objectMap["properties"] = RPCVar.RestorePointCollectionProperties
- }
- if RPCVar.Location != nil {
- objectMap["location"] = RPCVar.Location
- }
- if RPCVar.Tags != nil {
- objectMap["tags"] = RPCVar.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for RestorePointCollection struct.
-func (RPCVar *RestorePointCollection) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var restorePointCollectionProperties RestorePointCollectionProperties
- err = json.Unmarshal(*v, &restorePointCollectionProperties)
- if err != nil {
- return err
- }
- RPCVar.RestorePointCollectionProperties = &restorePointCollectionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- RPCVar.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- RPCVar.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- RPCVar.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- RPCVar.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- RPCVar.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// RestorePointCollectionListResult the List restore point collection operation response.
-type RestorePointCollectionListResult struct {
- autorest.Response `json:"-"`
- // Value - Gets the list of restore point collections.
- Value *[]RestorePointCollection `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of RestorePointCollections. Call ListNext() with this to fetch the next page of RestorePointCollections
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// RestorePointCollectionListResultIterator provides access to a complete listing of RestorePointCollection
-// values.
-type RestorePointCollectionListResultIterator struct {
- i int
- page RestorePointCollectionListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *RestorePointCollectionListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *RestorePointCollectionListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter RestorePointCollectionListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter RestorePointCollectionListResultIterator) Response() RestorePointCollectionListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter RestorePointCollectionListResultIterator) Value() RestorePointCollection {
- if !iter.page.NotDone() {
- return RestorePointCollection{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the RestorePointCollectionListResultIterator type.
-func NewRestorePointCollectionListResultIterator(page RestorePointCollectionListResultPage) RestorePointCollectionListResultIterator {
- return RestorePointCollectionListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (rpclr RestorePointCollectionListResult) IsEmpty() bool {
- return rpclr.Value == nil || len(*rpclr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (rpclr RestorePointCollectionListResult) hasNextLink() bool {
- return rpclr.NextLink != nil && len(*rpclr.NextLink) != 0
-}
-
-// restorePointCollectionListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (rpclr RestorePointCollectionListResult) restorePointCollectionListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !rpclr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(rpclr.NextLink)))
-}
-
-// RestorePointCollectionListResultPage contains a page of RestorePointCollection values.
-type RestorePointCollectionListResultPage struct {
- fn func(context.Context, RestorePointCollectionListResult) (RestorePointCollectionListResult, error)
- rpclr RestorePointCollectionListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *RestorePointCollectionListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.rpclr)
- if err != nil {
- return err
- }
- page.rpclr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *RestorePointCollectionListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page RestorePointCollectionListResultPage) NotDone() bool {
- return !page.rpclr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page RestorePointCollectionListResultPage) Response() RestorePointCollectionListResult {
- return page.rpclr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page RestorePointCollectionListResultPage) Values() []RestorePointCollection {
- if page.rpclr.IsEmpty() {
- return nil
- }
- return *page.rpclr.Value
-}
-
-// Creates a new instance of the RestorePointCollectionListResultPage type.
-func NewRestorePointCollectionListResultPage(cur RestorePointCollectionListResult, getNextPage func(context.Context, RestorePointCollectionListResult) (RestorePointCollectionListResult, error)) RestorePointCollectionListResultPage {
- return RestorePointCollectionListResultPage{
- fn: getNextPage,
- rpclr: cur,
- }
-}
-
-// RestorePointCollectionProperties the restore point collection properties.
-type RestorePointCollectionProperties struct {
- Source *RestorePointCollectionSourceProperties `json:"source,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state of the restore point collection.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // RestorePointCollectionID - READ-ONLY; The unique id of the restore point collection.
- RestorePointCollectionID *string `json:"restorePointCollectionId,omitempty"`
- // RestorePoints - READ-ONLY; A list containing all restore points created under this restore point collection.
- RestorePoints *[]RestorePoint `json:"restorePoints,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RestorePointCollectionProperties.
-func (rpcp RestorePointCollectionProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if rpcp.Source != nil {
- objectMap["source"] = rpcp.Source
- }
- return json.Marshal(objectMap)
-}
-
-// RestorePointCollectionsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type RestorePointCollectionsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(RestorePointCollectionsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *RestorePointCollectionsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for RestorePointCollectionsDeleteFuture.Result.
-func (future *RestorePointCollectionsDeleteFuture) result(client RestorePointCollectionsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.RestorePointCollectionsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// RestorePointCollectionSourceProperties the properties of the source resource that this restore point
-// collection is created from.
-type RestorePointCollectionSourceProperties struct {
- // Location - READ-ONLY; Location of the source resource used to create this restore point collection.
- Location *string `json:"location,omitempty"`
- // ID - Resource Id of the source resource used to create this restore point collection
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RestorePointCollectionSourceProperties.
-func (rpcsp RestorePointCollectionSourceProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if rpcsp.ID != nil {
- objectMap["id"] = rpcsp.ID
- }
- return json.Marshal(objectMap)
-}
-
-// RestorePointCollectionUpdate update Restore Point collection parameters.
-type RestorePointCollectionUpdate struct {
- *RestorePointCollectionProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for RestorePointCollectionUpdate.
-func (rpcu RestorePointCollectionUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if rpcu.RestorePointCollectionProperties != nil {
- objectMap["properties"] = rpcu.RestorePointCollectionProperties
- }
- if rpcu.Tags != nil {
- objectMap["tags"] = rpcu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for RestorePointCollectionUpdate struct.
-func (rpcu *RestorePointCollectionUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var restorePointCollectionProperties RestorePointCollectionProperties
- err = json.Unmarshal(*v, &restorePointCollectionProperties)
- if err != nil {
- return err
- }
- rpcu.RestorePointCollectionProperties = &restorePointCollectionProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- rpcu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// RestorePointProperties the restore point properties.
-type RestorePointProperties struct {
- // ExcludeDisks - List of disk resource ids that the customer wishes to exclude from the restore point. If no disks are specified, all disks will be included.
- ExcludeDisks *[]APIEntityReference `json:"excludeDisks,omitempty"`
- // SourceMetadata - READ-ONLY; Gets the details of the VM captured at the time of the restore point creation.
- SourceMetadata *RestorePointSourceMetadata `json:"sourceMetadata,omitempty"`
- // ProvisioningState - READ-ONLY; Gets the provisioning state of the restore point.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // ConsistencyMode - READ-ONLY; Gets the consistency mode for the restore point. Please refer to https://aka.ms/RestorePoints for more details. Possible values include: 'ConsistencyModeTypesCrashConsistent', 'ConsistencyModeTypesFileSystemConsistent', 'ConsistencyModeTypesApplicationConsistent'
- ConsistencyMode ConsistencyModeTypes `json:"consistencyMode,omitempty"`
- // ProvisioningDetails - READ-ONLY; Gets the provisioning details set by the server during Create restore point operation.
- ProvisioningDetails *RestorePointProvisioningDetails `json:"provisioningDetails,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RestorePointProperties.
-func (rpp RestorePointProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if rpp.ExcludeDisks != nil {
- objectMap["excludeDisks"] = rpp.ExcludeDisks
- }
- return json.Marshal(objectMap)
-}
-
-// RestorePointProvisioningDetails restore Point Provisioning details.
-type RestorePointProvisioningDetails struct {
- // CreationTime - Gets the creation time of the restore point.
- CreationTime *date.Time `json:"creationTime,omitempty"`
- // TotalUsedSizeInBytes - Gets the total size of the data in all the disks which are part of the restore point.
- TotalUsedSizeInBytes *int64 `json:"totalUsedSizeInBytes,omitempty"`
- // StatusCode - Gets the status of the Create restore point operation.
- StatusCode *int32 `json:"statusCode,omitempty"`
- // StatusMessage - Gets the status message of the Create restore point operation.
- StatusMessage *string `json:"statusMessage,omitempty"`
-}
-
-// RestorePointsCreateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type RestorePointsCreateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(RestorePointsClient) (RestorePoint, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *RestorePointsCreateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for RestorePointsCreateFuture.Result.
-func (future *RestorePointsCreateFuture) result(client RestorePointsClient) (rp RestorePoint, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsCreateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- rp.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.RestorePointsCreateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if rp.Response.Response, err = future.GetResult(sender); err == nil && rp.Response.Response.StatusCode != http.StatusNoContent {
- rp, err = client.CreateResponder(rp.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsCreateFuture", "Result", rp.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// RestorePointsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type RestorePointsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(RestorePointsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *RestorePointsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for RestorePointsDeleteFuture.Result.
-func (future *RestorePointsDeleteFuture) result(client RestorePointsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.RestorePointsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// RestorePointSourceMetadata describes the properties of the Virtual Machine for which the restore point
-// was created. The properties provided are a subset and the snapshot of the overall Virtual Machine
-// properties captured at the time of the restore point creation.
-type RestorePointSourceMetadata struct {
- // HardwareProfile - Gets the hardware profile.
- HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"`
- // StorageProfile - Gets the storage profile.
- StorageProfile *RestorePointSourceVMStorageProfile `json:"storageProfile,omitempty"`
- // OsProfile - Gets the OS profile.
- OsProfile *OSProfile `json:"osProfile,omitempty"`
- // DiagnosticsProfile - Gets the diagnostics profile.
- DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"`
- // LicenseType - Gets the license type, which is for bring your own license scenario.
- LicenseType *string `json:"licenseType,omitempty"`
- // VMID - Gets the virtual machine unique id.
- VMID *string `json:"vmId,omitempty"`
- // SecurityProfile - Gets the security profile.
- SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"`
- // Location - Location of the VM from which the restore point was created.
- Location *string `json:"location,omitempty"`
-}
-
-// RestorePointSourceVMDataDisk describes a data disk.
-type RestorePointSourceVMDataDisk struct {
- // Lun - Gets the logical unit number.
- Lun *int32 `json:"lun,omitempty"`
- // Name - Gets the disk name.
- Name *string `json:"name,omitempty"`
- // Caching - Gets the caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // DiskSizeGB - Gets the initial disk size in GB for blank data disks, and the new desired size for existing OS and Data disks.
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // ManagedDisk - Gets the managed disk details
- ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
- // DiskRestorePoint - Gets the disk restore point Id.
- DiskRestorePoint *APIEntityReference `json:"diskRestorePoint,omitempty"`
-}
-
-// RestorePointSourceVMOSDisk describes an Operating System disk.
-type RestorePointSourceVMOSDisk struct {
- // OsType - Gets the Operating System type. Possible values include: 'OperatingSystemTypeWindows', 'OperatingSystemTypeLinux'
- OsType OperatingSystemType `json:"osType,omitempty"`
- // EncryptionSettings - Gets the disk encryption settings.
- EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"`
- // Name - Gets the disk name.
- Name *string `json:"name,omitempty"`
- // Caching - Gets the caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // DiskSizeGB - Gets the disk size in GB.
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // ManagedDisk - Gets the managed disk details
- ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
- // DiskRestorePoint - Gets the disk restore point Id.
- DiskRestorePoint *APIEntityReference `json:"diskRestorePoint,omitempty"`
-}
-
-// RestorePointSourceVMStorageProfile describes the storage profile.
-type RestorePointSourceVMStorageProfile struct {
- // OsDisk - Gets the OS disk of the VM captured at the time of the restore point creation.
- OsDisk *RestorePointSourceVMOSDisk `json:"osDisk,omitempty"`
- // DataDisks - Gets the data disks of the VM captured at the time of the restore point creation.
- DataDisks *[]RestorePointSourceVMDataDisk `json:"dataDisks,omitempty"`
-}
-
-// RetrieveBootDiagnosticsDataResult the SAS URIs of the console screenshot and serial log blobs.
-type RetrieveBootDiagnosticsDataResult struct {
- autorest.Response `json:"-"`
- // ConsoleScreenshotBlobURI - READ-ONLY; The console screenshot blob URI
- ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"`
- // SerialConsoleLogBlobURI - READ-ONLY; The serial console log blob URI.
- SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RetrieveBootDiagnosticsDataResult.
-func (rbddr RetrieveBootDiagnosticsDataResult) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RoleInstance ...
-type RoleInstance struct {
- autorest.Response `json:"-"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource Name.
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource Type.
- Type *string `json:"type,omitempty"`
- // Location - READ-ONLY; Resource Location.
- Location *string `json:"location,omitempty"`
- // Tags - READ-ONLY; Resource tags.
- Tags map[string]*string `json:"tags"`
- Sku *InstanceSku `json:"sku,omitempty"`
- Properties *RoleInstanceProperties `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RoleInstance.
-func (ri RoleInstance) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ri.Sku != nil {
- objectMap["sku"] = ri.Sku
- }
- if ri.Properties != nil {
- objectMap["properties"] = ri.Properties
- }
- return json.Marshal(objectMap)
-}
-
-// RoleInstanceInstanceView the instance view of the role instance.
-type RoleInstanceInstanceView struct {
- autorest.Response `json:"-"`
- // PlatformUpdateDomain - READ-ONLY; The Update Domain.
- PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"`
- // PlatformFaultDomain - READ-ONLY; The Fault Domain.
- PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"`
- // PrivateID - READ-ONLY; Specifies a unique identifier generated internally for the cloud service associated with this role instance.
NOTE: If you are using Azure Diagnostics extension, this property can be used as 'DeploymentId' for querying details.
- PrivateID *string `json:"privateId,omitempty"`
- // Statuses - READ-ONLY
- Statuses *[]ResourceInstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RoleInstanceInstanceView.
-func (riiv RoleInstanceInstanceView) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RoleInstanceListResult ...
-type RoleInstanceListResult struct {
- autorest.Response `json:"-"`
- Value *[]RoleInstance `json:"value,omitempty"`
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// RoleInstanceListResultIterator provides access to a complete listing of RoleInstance values.
-type RoleInstanceListResultIterator struct {
- i int
- page RoleInstanceListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *RoleInstanceListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RoleInstanceListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *RoleInstanceListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter RoleInstanceListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter RoleInstanceListResultIterator) Response() RoleInstanceListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter RoleInstanceListResultIterator) Value() RoleInstance {
- if !iter.page.NotDone() {
- return RoleInstance{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the RoleInstanceListResultIterator type.
-func NewRoleInstanceListResultIterator(page RoleInstanceListResultPage) RoleInstanceListResultIterator {
- return RoleInstanceListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (rilr RoleInstanceListResult) IsEmpty() bool {
- return rilr.Value == nil || len(*rilr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (rilr RoleInstanceListResult) hasNextLink() bool {
- return rilr.NextLink != nil && len(*rilr.NextLink) != 0
-}
-
-// roleInstanceListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (rilr RoleInstanceListResult) roleInstanceListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !rilr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(rilr.NextLink)))
-}
-
-// RoleInstanceListResultPage contains a page of RoleInstance values.
-type RoleInstanceListResultPage struct {
- fn func(context.Context, RoleInstanceListResult) (RoleInstanceListResult, error)
- rilr RoleInstanceListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *RoleInstanceListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RoleInstanceListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.rilr)
- if err != nil {
- return err
- }
- page.rilr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *RoleInstanceListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page RoleInstanceListResultPage) NotDone() bool {
- return !page.rilr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page RoleInstanceListResultPage) Response() RoleInstanceListResult {
- return page.rilr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page RoleInstanceListResultPage) Values() []RoleInstance {
- if page.rilr.IsEmpty() {
- return nil
- }
- return *page.rilr.Value
-}
-
-// Creates a new instance of the RoleInstanceListResultPage type.
-func NewRoleInstanceListResultPage(cur RoleInstanceListResult, getNextPage func(context.Context, RoleInstanceListResult) (RoleInstanceListResult, error)) RoleInstanceListResultPage {
- return RoleInstanceListResultPage{
- fn: getNextPage,
- rilr: cur,
- }
-}
-
-// RoleInstanceNetworkProfile describes the network profile for the role instance.
-type RoleInstanceNetworkProfile struct {
- // NetworkInterfaces - READ-ONLY; Specifies the list of resource Ids for the network interfaces associated with the role instance.
- NetworkInterfaces *[]SubResource `json:"networkInterfaces,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RoleInstanceNetworkProfile.
-func (rinp RoleInstanceNetworkProfile) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RoleInstanceProperties ...
-type RoleInstanceProperties struct {
- NetworkProfile *RoleInstanceNetworkProfile `json:"networkProfile,omitempty"`
- InstanceView *RoleInstanceInstanceView `json:"instanceView,omitempty"`
-}
-
-// RoleInstances specifies a list of role instances from the cloud service.
-type RoleInstances struct {
- // RoleInstances - List of cloud service role instance names. Value of '*' will signify all role instances of the cloud service.
- RoleInstances *[]string `json:"roleInstances,omitempty"`
-}
-
-// RollbackStatusInfo information about rollback on failed VM instances after a OS Upgrade operation.
-type RollbackStatusInfo struct {
- // SuccessfullyRolledbackInstanceCount - READ-ONLY; The number of instances which have been successfully rolled back.
- SuccessfullyRolledbackInstanceCount *int32 `json:"successfullyRolledbackInstanceCount,omitempty"`
- // FailedRolledbackInstanceCount - READ-ONLY; The number of instances which failed to rollback.
- FailedRolledbackInstanceCount *int32 `json:"failedRolledbackInstanceCount,omitempty"`
- // RollbackError - READ-ONLY; Error details if OS rollback failed.
- RollbackError *APIError `json:"rollbackError,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RollbackStatusInfo.
-func (rsi RollbackStatusInfo) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RollingUpgradePolicy the configuration parameters used while performing a rolling upgrade.
-type RollingUpgradePolicy struct {
- // MaxBatchInstancePercent - The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter is 20%.
- MaxBatchInstancePercent *int32 `json:"maxBatchInstancePercent,omitempty"`
- // MaxUnhealthyInstancePercent - The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The default value for this parameter is 20%.
- MaxUnhealthyInstancePercent *int32 `json:"maxUnhealthyInstancePercent,omitempty"`
- // MaxUnhealthyUpgradedInstancePercent - The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The default value for this parameter is 20%.
- MaxUnhealthyUpgradedInstancePercent *int32 `json:"maxUnhealthyUpgradedInstancePercent,omitempty"`
- // PauseTimeBetweenBatches - The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. The default value is 0 seconds (PT0S).
- PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty"`
- // EnableCrossZoneUpgrade - Allow VMSS to ignore AZ boundaries when constructing upgrade batches. Take into consideration the Update Domain and maxBatchInstancePercent to determine the batch size.
- EnableCrossZoneUpgrade *bool `json:"enableCrossZoneUpgrade,omitempty"`
- // PrioritizeUnhealthyInstances - Upgrade all unhealthy instances in a scale set before any healthy instances.
- PrioritizeUnhealthyInstances *bool `json:"prioritizeUnhealthyInstances,omitempty"`
-}
-
-// RollingUpgradeProgressInfo information about the number of virtual machine instances in each upgrade
-// state.
-type RollingUpgradeProgressInfo struct {
- // SuccessfulInstanceCount - READ-ONLY; The number of instances that have been successfully upgraded.
- SuccessfulInstanceCount *int32 `json:"successfulInstanceCount,omitempty"`
- // FailedInstanceCount - READ-ONLY; The number of instances that have failed to be upgraded successfully.
- FailedInstanceCount *int32 `json:"failedInstanceCount,omitempty"`
- // InProgressInstanceCount - READ-ONLY; The number of instances that are currently being upgraded.
- InProgressInstanceCount *int32 `json:"inProgressInstanceCount,omitempty"`
- // PendingInstanceCount - READ-ONLY; The number of instances that have not yet begun to be upgraded.
- PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RollingUpgradeProgressInfo.
-func (rupi RollingUpgradeProgressInfo) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RollingUpgradeRunningStatus information about the current running state of the overall upgrade.
-type RollingUpgradeRunningStatus struct {
- // Code - READ-ONLY; Code indicating the current status of the upgrade. Possible values include: 'RollingUpgradeStatusCodeRollingForward', 'RollingUpgradeStatusCodeCancelled', 'RollingUpgradeStatusCodeCompleted', 'RollingUpgradeStatusCodeFaulted'
- Code RollingUpgradeStatusCode `json:"code,omitempty"`
- // StartTime - READ-ONLY; Start time of the upgrade.
- StartTime *date.Time `json:"startTime,omitempty"`
- // LastAction - READ-ONLY; The last action performed on the rolling upgrade. Possible values include: 'RollingUpgradeActionTypeStart', 'RollingUpgradeActionTypeCancel'
- LastAction RollingUpgradeActionType `json:"lastAction,omitempty"`
- // LastActionTime - READ-ONLY; Last action time of the upgrade.
- LastActionTime *date.Time `json:"lastActionTime,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RollingUpgradeRunningStatus.
-func (rurs RollingUpgradeRunningStatus) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RollingUpgradeStatusInfo the status of the latest virtual machine scale set rolling upgrade.
-type RollingUpgradeStatusInfo struct {
- autorest.Response `json:"-"`
- *RollingUpgradeStatusInfoProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfo.
-func (rusi RollingUpgradeStatusInfo) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if rusi.RollingUpgradeStatusInfoProperties != nil {
- objectMap["properties"] = rusi.RollingUpgradeStatusInfoProperties
- }
- if rusi.Location != nil {
- objectMap["location"] = rusi.Location
- }
- if rusi.Tags != nil {
- objectMap["tags"] = rusi.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for RollingUpgradeStatusInfo struct.
-func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var rollingUpgradeStatusInfoProperties RollingUpgradeStatusInfoProperties
- err = json.Unmarshal(*v, &rollingUpgradeStatusInfoProperties)
- if err != nil {
- return err
- }
- rusi.RollingUpgradeStatusInfoProperties = &rollingUpgradeStatusInfoProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- rusi.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- rusi.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- rusi.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- rusi.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- rusi.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// RollingUpgradeStatusInfoProperties the status of the latest virtual machine scale set rolling upgrade.
-type RollingUpgradeStatusInfoProperties struct {
- // Policy - READ-ONLY; The rolling upgrade policies applied for this upgrade.
- Policy *RollingUpgradePolicy `json:"policy,omitempty"`
- // RunningStatus - READ-ONLY; Information about the current running state of the overall upgrade.
- RunningStatus *RollingUpgradeRunningStatus `json:"runningStatus,omitempty"`
- // Progress - READ-ONLY; Information about the number of virtual machine instances in each upgrade state.
- Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"`
- // Error - READ-ONLY; Error details for this upgrade, if there are any.
- Error *APIError `json:"error,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfoProperties.
-func (rusip RollingUpgradeStatusInfoProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// RunCommandDocument describes the properties of a Run Command.
-type RunCommandDocument struct {
- autorest.Response `json:"-"`
- // Script - The script to be executed.
- Script *[]string `json:"script,omitempty"`
- // Parameters - The parameters used by the script.
- Parameters *[]RunCommandParameterDefinition `json:"parameters,omitempty"`
- // Schema - The VM run command schema.
- Schema *string `json:"$schema,omitempty"`
- // ID - The VM run command id.
- ID *string `json:"id,omitempty"`
- // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // Label - The VM run command label.
- Label *string `json:"label,omitempty"`
- // Description - The VM run command description.
- Description *string `json:"description,omitempty"`
-}
-
-// RunCommandDocumentBase describes the properties of a Run Command metadata.
-type RunCommandDocumentBase struct {
- // Schema - The VM run command schema.
- Schema *string `json:"$schema,omitempty"`
- // ID - The VM run command id.
- ID *string `json:"id,omitempty"`
- // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // Label - The VM run command label.
- Label *string `json:"label,omitempty"`
- // Description - The VM run command description.
- Description *string `json:"description,omitempty"`
-}
-
-// RunCommandInput capture Virtual Machine parameters.
-type RunCommandInput struct {
- // CommandID - The run command id.
- CommandID *string `json:"commandId,omitempty"`
- // Script - Optional. The script to be executed. When this value is given, the given script will override the default script of the command.
- Script *[]string `json:"script,omitempty"`
- // Parameters - The run command parameters.
- Parameters *[]RunCommandInputParameter `json:"parameters,omitempty"`
-}
-
-// RunCommandInputParameter describes the properties of a run command parameter.
-type RunCommandInputParameter struct {
- // Name - The run command parameter name.
- Name *string `json:"name,omitempty"`
- // Value - The run command parameter value.
- Value *string `json:"value,omitempty"`
-}
-
-// RunCommandListResult the List Virtual Machine operation response.
-type RunCommandListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of virtual machine run commands.
- Value *[]RunCommandDocumentBase `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of run commands. Call ListNext() with this to fetch the next page of run commands.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// RunCommandListResultIterator provides access to a complete listing of RunCommandDocumentBase values.
-type RunCommandListResultIterator struct {
- i int
- page RunCommandListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *RunCommandListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RunCommandListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *RunCommandListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter RunCommandListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter RunCommandListResultIterator) Response() RunCommandListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter RunCommandListResultIterator) Value() RunCommandDocumentBase {
- if !iter.page.NotDone() {
- return RunCommandDocumentBase{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the RunCommandListResultIterator type.
-func NewRunCommandListResultIterator(page RunCommandListResultPage) RunCommandListResultIterator {
- return RunCommandListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (rclr RunCommandListResult) IsEmpty() bool {
- return rclr.Value == nil || len(*rclr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (rclr RunCommandListResult) hasNextLink() bool {
- return rclr.NextLink != nil && len(*rclr.NextLink) != 0
-}
-
-// runCommandListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (rclr RunCommandListResult) runCommandListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !rclr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(rclr.NextLink)))
-}
-
-// RunCommandListResultPage contains a page of RunCommandDocumentBase values.
-type RunCommandListResultPage struct {
- fn func(context.Context, RunCommandListResult) (RunCommandListResult, error)
- rclr RunCommandListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *RunCommandListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RunCommandListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.rclr)
- if err != nil {
- return err
- }
- page.rclr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *RunCommandListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page RunCommandListResultPage) NotDone() bool {
- return !page.rclr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page RunCommandListResultPage) Response() RunCommandListResult {
- return page.rclr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page RunCommandListResultPage) Values() []RunCommandDocumentBase {
- if page.rclr.IsEmpty() {
- return nil
- }
- return *page.rclr.Value
-}
-
-// Creates a new instance of the RunCommandListResultPage type.
-func NewRunCommandListResultPage(cur RunCommandListResult, getNextPage func(context.Context, RunCommandListResult) (RunCommandListResult, error)) RunCommandListResultPage {
- return RunCommandListResultPage{
- fn: getNextPage,
- rclr: cur,
- }
-}
-
-// RunCommandParameterDefinition describes the properties of a run command parameter.
-type RunCommandParameterDefinition struct {
- // Name - The run command parameter name.
- Name *string `json:"name,omitempty"`
- // Type - The run command parameter type.
- Type *string `json:"type,omitempty"`
- // DefaultValue - The run command parameter default value.
- DefaultValue *string `json:"defaultValue,omitempty"`
- // Required - The run command parameter required.
- Required *bool `json:"required,omitempty"`
-}
-
-// RunCommandResult ...
-type RunCommandResult struct {
- autorest.Response `json:"-"`
- // Value - Run command operation response.
- Value *[]InstanceViewStatus `json:"value,omitempty"`
-}
-
-// ScaleInPolicy describes a scale-in policy for a virtual machine scale set.
-type ScaleInPolicy struct {
- // Rules - The rules to be followed when scaling-in a virtual machine scale set.
Possible values are:
**Default** When a virtual machine scale set is scaled in, the scale set will first be balanced across zones if it is a zonal scale set. Then, it will be balanced across Fault Domains as far as possible. Within each Fault Domain, the virtual machines chosen for removal will be the newest ones that are not protected from scale-in.
**OldestVM** When a virtual machine scale set is being scaled-in, the oldest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the oldest virtual machines that are not protected will be chosen for removal.
**NewestVM** When a virtual machine scale set is being scaled-in, the newest virtual machines that are not protected from scale-in will be chosen for removal. For zonal virtual machine scale sets, the scale set will first be balanced across zones. Within each zone, the newest virtual machines that are not protected will be chosen for removal.
- Rules *[]VirtualMachineScaleSetScaleInRules `json:"rules,omitempty"`
- // ForceDeletion - This property allows you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.(Feature in Preview)
- ForceDeletion *bool `json:"forceDeletion,omitempty"`
-}
-
-// ScheduledEventsProfile ...
-type ScheduledEventsProfile struct {
- // TerminateNotificationProfile - Specifies Terminate Scheduled Event related configurations.
- TerminateNotificationProfile *TerminateNotificationProfile `json:"terminateNotificationProfile,omitempty"`
-}
-
-// SecurityProfile specifies the Security profile settings for the virtual machine or virtual machine scale
-// set.
-type SecurityProfile struct {
- // UefiSettings - Specifies the security settings like secure boot and vTPM used while creating the virtual machine.
Minimum api-version: 2020-12-01
- UefiSettings *UefiSettings `json:"uefiSettings,omitempty"`
- // EncryptionAtHost - This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself.
Default: The Encryption at host will be disabled unless this property is set to true for the resource.
- EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"`
- // SecurityType - Specifies the SecurityType of the virtual machine. It is set as TrustedLaunch to enable UefiSettings.
Default: UefiSettings will not be enabled unless this property is set as TrustedLaunch. Possible values include: 'SecurityTypesTrustedLaunch'
- SecurityType SecurityTypes `json:"securityType,omitempty"`
-}
-
-// SharedGallery specifies information about the Shared Gallery that you want to create or update.
-type SharedGallery struct {
- autorest.Response `json:"-"`
- *SharedGalleryIdentifier `json:"identifier,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for SharedGallery.
-func (sg SharedGallery) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if sg.SharedGalleryIdentifier != nil {
- objectMap["identifier"] = sg.SharedGalleryIdentifier
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for SharedGallery struct.
-func (sg *SharedGallery) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "identifier":
- if v != nil {
- var sharedGalleryIdentifier SharedGalleryIdentifier
- err = json.Unmarshal(*v, &sharedGalleryIdentifier)
- if err != nil {
- return err
- }
- sg.SharedGalleryIdentifier = &sharedGalleryIdentifier
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- sg.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- sg.Location = &location
- }
- }
- }
-
- return nil
-}
-
-// SharedGalleryIdentifier the identifier information of shared gallery.
-type SharedGalleryIdentifier struct {
- // UniqueID - The unique id of this shared gallery.
- UniqueID *string `json:"uniqueId,omitempty"`
-}
-
-// SharedGalleryImage specifies information about the gallery image definition that you want to create or
-// update.
-type SharedGalleryImage struct {
- autorest.Response `json:"-"`
- *SharedGalleryImageProperties `json:"properties,omitempty"`
- *SharedGalleryIdentifier `json:"identifier,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for SharedGalleryImage.
-func (sgi SharedGalleryImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if sgi.SharedGalleryImageProperties != nil {
- objectMap["properties"] = sgi.SharedGalleryImageProperties
- }
- if sgi.SharedGalleryIdentifier != nil {
- objectMap["identifier"] = sgi.SharedGalleryIdentifier
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for SharedGalleryImage struct.
-func (sgi *SharedGalleryImage) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var sharedGalleryImageProperties SharedGalleryImageProperties
- err = json.Unmarshal(*v, &sharedGalleryImageProperties)
- if err != nil {
- return err
- }
- sgi.SharedGalleryImageProperties = &sharedGalleryImageProperties
- }
- case "identifier":
- if v != nil {
- var sharedGalleryIdentifier SharedGalleryIdentifier
- err = json.Unmarshal(*v, &sharedGalleryIdentifier)
- if err != nil {
- return err
- }
- sgi.SharedGalleryIdentifier = &sharedGalleryIdentifier
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- sgi.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- sgi.Location = &location
- }
- }
- }
-
- return nil
-}
-
-// SharedGalleryImageList the List Shared Gallery Images operation response.
-type SharedGalleryImageList struct {
- autorest.Response `json:"-"`
- // Value - A list of shared gallery images.
- Value *[]SharedGalleryImage `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of shared gallery images. Call ListNext() with this to fetch the next page of shared gallery images.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// SharedGalleryImageListIterator provides access to a complete listing of SharedGalleryImage values.
-type SharedGalleryImageListIterator struct {
- i int
- page SharedGalleryImageListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *SharedGalleryImageListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *SharedGalleryImageListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter SharedGalleryImageListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter SharedGalleryImageListIterator) Response() SharedGalleryImageList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter SharedGalleryImageListIterator) Value() SharedGalleryImage {
- if !iter.page.NotDone() {
- return SharedGalleryImage{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the SharedGalleryImageListIterator type.
-func NewSharedGalleryImageListIterator(page SharedGalleryImageListPage) SharedGalleryImageListIterator {
- return SharedGalleryImageListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (sgil SharedGalleryImageList) IsEmpty() bool {
- return sgil.Value == nil || len(*sgil.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (sgil SharedGalleryImageList) hasNextLink() bool {
- return sgil.NextLink != nil && len(*sgil.NextLink) != 0
-}
-
-// sharedGalleryImageListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (sgil SharedGalleryImageList) sharedGalleryImageListPreparer(ctx context.Context) (*http.Request, error) {
- if !sgil.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(sgil.NextLink)))
-}
-
-// SharedGalleryImageListPage contains a page of SharedGalleryImage values.
-type SharedGalleryImageListPage struct {
- fn func(context.Context, SharedGalleryImageList) (SharedGalleryImageList, error)
- sgil SharedGalleryImageList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *SharedGalleryImageListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.sgil)
- if err != nil {
- return err
- }
- page.sgil = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *SharedGalleryImageListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page SharedGalleryImageListPage) NotDone() bool {
- return !page.sgil.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page SharedGalleryImageListPage) Response() SharedGalleryImageList {
- return page.sgil
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page SharedGalleryImageListPage) Values() []SharedGalleryImage {
- if page.sgil.IsEmpty() {
- return nil
- }
- return *page.sgil.Value
-}
-
-// Creates a new instance of the SharedGalleryImageListPage type.
-func NewSharedGalleryImageListPage(cur SharedGalleryImageList, getNextPage func(context.Context, SharedGalleryImageList) (SharedGalleryImageList, error)) SharedGalleryImageListPage {
- return SharedGalleryImageListPage{
- fn: getNextPage,
- sgil: cur,
- }
-}
-
-// SharedGalleryImageProperties describes the properties of a gallery image definition.
-type SharedGalleryImageProperties struct {
- // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'OperatingSystemStateTypesGeneralized', 'OperatingSystemStateTypesSpecialized'
- OsState OperatingSystemStateTypes `json:"osState,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
- Identifier *GalleryImageIdentifier `json:"identifier,omitempty"`
- Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"`
- Disallowed *Disallowed `json:"disallowed,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
- HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
- // Features - A list of gallery image features.
- Features *[]GalleryImageFeature `json:"features,omitempty"`
- PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"`
-}
-
-// SharedGalleryImageVersion specifies information about the gallery image version that you want to create
-// or update.
-type SharedGalleryImageVersion struct {
- autorest.Response `json:"-"`
- *SharedGalleryImageVersionProperties `json:"properties,omitempty"`
- *SharedGalleryIdentifier `json:"identifier,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for SharedGalleryImageVersion.
-func (sgiv SharedGalleryImageVersion) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if sgiv.SharedGalleryImageVersionProperties != nil {
- objectMap["properties"] = sgiv.SharedGalleryImageVersionProperties
- }
- if sgiv.SharedGalleryIdentifier != nil {
- objectMap["identifier"] = sgiv.SharedGalleryIdentifier
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for SharedGalleryImageVersion struct.
-func (sgiv *SharedGalleryImageVersion) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var sharedGalleryImageVersionProperties SharedGalleryImageVersionProperties
- err = json.Unmarshal(*v, &sharedGalleryImageVersionProperties)
- if err != nil {
- return err
- }
- sgiv.SharedGalleryImageVersionProperties = &sharedGalleryImageVersionProperties
- }
- case "identifier":
- if v != nil {
- var sharedGalleryIdentifier SharedGalleryIdentifier
- err = json.Unmarshal(*v, &sharedGalleryIdentifier)
- if err != nil {
- return err
- }
- sgiv.SharedGalleryIdentifier = &sharedGalleryIdentifier
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- sgiv.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- sgiv.Location = &location
- }
- }
- }
-
- return nil
-}
-
-// SharedGalleryImageVersionList the List Shared Gallery Image versions operation response.
-type SharedGalleryImageVersionList struct {
- autorest.Response `json:"-"`
- // Value - A list of shared gallery images versions.
- Value *[]SharedGalleryImageVersion `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of shared gallery image versions. Call ListNext() with this to fetch the next page of shared gallery image versions.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// SharedGalleryImageVersionListIterator provides access to a complete listing of SharedGalleryImageVersion
-// values.
-type SharedGalleryImageVersionListIterator struct {
- i int
- page SharedGalleryImageVersionListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *SharedGalleryImageVersionListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *SharedGalleryImageVersionListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter SharedGalleryImageVersionListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter SharedGalleryImageVersionListIterator) Response() SharedGalleryImageVersionList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter SharedGalleryImageVersionListIterator) Value() SharedGalleryImageVersion {
- if !iter.page.NotDone() {
- return SharedGalleryImageVersion{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the SharedGalleryImageVersionListIterator type.
-func NewSharedGalleryImageVersionListIterator(page SharedGalleryImageVersionListPage) SharedGalleryImageVersionListIterator {
- return SharedGalleryImageVersionListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (sgivl SharedGalleryImageVersionList) IsEmpty() bool {
- return sgivl.Value == nil || len(*sgivl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (sgivl SharedGalleryImageVersionList) hasNextLink() bool {
- return sgivl.NextLink != nil && len(*sgivl.NextLink) != 0
-}
-
-// sharedGalleryImageVersionListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (sgivl SharedGalleryImageVersionList) sharedGalleryImageVersionListPreparer(ctx context.Context) (*http.Request, error) {
- if !sgivl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(sgivl.NextLink)))
-}
-
-// SharedGalleryImageVersionListPage contains a page of SharedGalleryImageVersion values.
-type SharedGalleryImageVersionListPage struct {
- fn func(context.Context, SharedGalleryImageVersionList) (SharedGalleryImageVersionList, error)
- sgivl SharedGalleryImageVersionList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *SharedGalleryImageVersionListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.sgivl)
- if err != nil {
- return err
- }
- page.sgivl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *SharedGalleryImageVersionListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page SharedGalleryImageVersionListPage) NotDone() bool {
- return !page.sgivl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page SharedGalleryImageVersionListPage) Response() SharedGalleryImageVersionList {
- return page.sgivl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page SharedGalleryImageVersionListPage) Values() []SharedGalleryImageVersion {
- if page.sgivl.IsEmpty() {
- return nil
- }
- return *page.sgivl.Value
-}
-
-// Creates a new instance of the SharedGalleryImageVersionListPage type.
-func NewSharedGalleryImageVersionListPage(cur SharedGalleryImageVersionList, getNextPage func(context.Context, SharedGalleryImageVersionList) (SharedGalleryImageVersionList, error)) SharedGalleryImageVersionListPage {
- return SharedGalleryImageVersionListPage{
- fn: getNextPage,
- sgivl: cur,
- }
-}
-
-// SharedGalleryImageVersionProperties describes the properties of a gallery image version.
-type SharedGalleryImageVersionProperties struct {
- // PublishedDate - The published date of the gallery image version Definition. This property can be used for decommissioning purposes. This property is updatable.
- PublishedDate *date.Time `json:"publishedDate,omitempty"`
- // EndOfLifeDate - The end of life date of the gallery image version Definition. This property can be used for decommissioning purposes. This property is updatable.
- EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
-}
-
-// SharedGalleryList the List Shared Galleries operation response.
-type SharedGalleryList struct {
- autorest.Response `json:"-"`
- // Value - A list of shared galleries.
- Value *[]SharedGallery `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of shared galleries. Call ListNext() with this to fetch the next page of shared galleries.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// SharedGalleryListIterator provides access to a complete listing of SharedGallery values.
-type SharedGalleryListIterator struct {
- i int
- page SharedGalleryListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *SharedGalleryListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *SharedGalleryListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter SharedGalleryListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter SharedGalleryListIterator) Response() SharedGalleryList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter SharedGalleryListIterator) Value() SharedGallery {
- if !iter.page.NotDone() {
- return SharedGallery{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the SharedGalleryListIterator type.
-func NewSharedGalleryListIterator(page SharedGalleryListPage) SharedGalleryListIterator {
- return SharedGalleryListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (sgl SharedGalleryList) IsEmpty() bool {
- return sgl.Value == nil || len(*sgl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (sgl SharedGalleryList) hasNextLink() bool {
- return sgl.NextLink != nil && len(*sgl.NextLink) != 0
-}
-
-// sharedGalleryListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (sgl SharedGalleryList) sharedGalleryListPreparer(ctx context.Context) (*http.Request, error) {
- if !sgl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(sgl.NextLink)))
-}
-
-// SharedGalleryListPage contains a page of SharedGallery values.
-type SharedGalleryListPage struct {
- fn func(context.Context, SharedGalleryList) (SharedGalleryList, error)
- sgl SharedGalleryList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *SharedGalleryListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.sgl)
- if err != nil {
- return err
- }
- page.sgl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *SharedGalleryListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page SharedGalleryListPage) NotDone() bool {
- return !page.sgl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page SharedGalleryListPage) Response() SharedGalleryList {
- return page.sgl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page SharedGalleryListPage) Values() []SharedGallery {
- if page.sgl.IsEmpty() {
- return nil
- }
- return *page.sgl.Value
-}
-
-// Creates a new instance of the SharedGalleryListPage type.
-func NewSharedGalleryListPage(cur SharedGalleryList, getNextPage func(context.Context, SharedGalleryList) (SharedGalleryList, error)) SharedGalleryListPage {
- return SharedGalleryListPage{
- fn: getNextPage,
- sgl: cur,
- }
-}
-
-// ShareInfoElement ...
-type ShareInfoElement struct {
- // VMURI - READ-ONLY; A relative URI containing the ID of the VM that has the disk attached.
- VMURI *string `json:"vmUri,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for ShareInfoElement.
-func (sie ShareInfoElement) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// SharingProfile profile for gallery sharing to subscription or tenant
-type SharingProfile struct {
- // Permissions - This property allows you to specify the permission of sharing gallery.
Possible values are:
**Private**
**Groups**. Possible values include: 'GallerySharingPermissionTypesPrivate', 'GallerySharingPermissionTypesGroups'
- Permissions GallerySharingPermissionTypes `json:"permissions,omitempty"`
- // Groups - READ-ONLY; A list of sharing profile groups.
- Groups *[]SharingProfileGroup `json:"groups,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for SharingProfile.
-func (sp SharingProfile) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if sp.Permissions != "" {
- objectMap["permissions"] = sp.Permissions
- }
- return json.Marshal(objectMap)
-}
-
-// SharingProfileGroup group of the gallery sharing profile
-type SharingProfileGroup struct {
- // Type - This property allows you to specify the type of sharing group.
Possible values are:
**Subscriptions**
**AADTenants**. Possible values include: 'SharingProfileGroupTypesSubscriptions', 'SharingProfileGroupTypesAADTenants'
- Type SharingProfileGroupTypes `json:"type,omitempty"`
- // Ids - A list of subscription/tenant ids the gallery is aimed to be shared to.
- Ids *[]string `json:"ids,omitempty"`
-}
-
-// SharingUpdate specifies information about the gallery sharing profile update.
-type SharingUpdate struct {
- autorest.Response `json:"-"`
- // OperationType - This property allows you to specify the operation type of gallery sharing update.
Possible values are:
**Add**
**Remove**
**Reset**. Possible values include: 'SharingUpdateOperationTypesAdd', 'SharingUpdateOperationTypesRemove', 'SharingUpdateOperationTypesReset'
- OperationType SharingUpdateOperationTypes `json:"operationType,omitempty"`
- // Groups - A list of sharing profile groups.
- Groups *[]SharingProfileGroup `json:"groups,omitempty"`
-}
-
-// Sku describes a virtual machine scale set sku. NOTE: If the new VM SKU is not supported on the hardware
-// the scale set is currently on, you need to deallocate the VMs in the scale set before you modify the SKU
-// name.
-type Sku struct {
- // Name - The sku name.
- Name *string `json:"name,omitempty"`
- // Tier - Specifies the tier of virtual machines in a scale set.
Possible Values:
**Standard**
**Basic**
- Tier *string `json:"tier,omitempty"`
- // Capacity - Specifies the number of virtual machines in the scale set.
- Capacity *int64 `json:"capacity,omitempty"`
-}
-
-// Snapshot snapshot resource.
-type Snapshot struct {
- autorest.Response `json:"-"`
- // ManagedBy - READ-ONLY; Unused. Always Null.
- ManagedBy *string `json:"managedBy,omitempty"`
- Sku *SnapshotSku `json:"sku,omitempty"`
- // ExtendedLocation - The extended location where the snapshot will be created. Extended location cannot be changed.
- ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
- *SnapshotProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for Snapshot.
-func (s Snapshot) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if s.Sku != nil {
- objectMap["sku"] = s.Sku
- }
- if s.ExtendedLocation != nil {
- objectMap["extendedLocation"] = s.ExtendedLocation
- }
- if s.SnapshotProperties != nil {
- objectMap["properties"] = s.SnapshotProperties
- }
- if s.Location != nil {
- objectMap["location"] = s.Location
- }
- if s.Tags != nil {
- objectMap["tags"] = s.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for Snapshot struct.
-func (s *Snapshot) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "managedBy":
- if v != nil {
- var managedBy string
- err = json.Unmarshal(*v, &managedBy)
- if err != nil {
- return err
- }
- s.ManagedBy = &managedBy
- }
- case "sku":
- if v != nil {
- var sku SnapshotSku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- s.Sku = &sku
- }
- case "extendedLocation":
- if v != nil {
- var extendedLocation ExtendedLocation
- err = json.Unmarshal(*v, &extendedLocation)
- if err != nil {
- return err
- }
- s.ExtendedLocation = &extendedLocation
- }
- case "properties":
- if v != nil {
- var snapshotProperties SnapshotProperties
- err = json.Unmarshal(*v, &snapshotProperties)
- if err != nil {
- return err
- }
- s.SnapshotProperties = &snapshotProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- s.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- s.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- s.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- s.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- s.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// SnapshotList the List Snapshots operation response.
-type SnapshotList struct {
- autorest.Response `json:"-"`
- // Value - A list of snapshots.
- Value *[]Snapshot `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// SnapshotListIterator provides access to a complete listing of Snapshot values.
-type SnapshotListIterator struct {
- i int
- page SnapshotListPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *SnapshotListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *SnapshotListIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter SnapshotListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter SnapshotListIterator) Response() SnapshotList {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter SnapshotListIterator) Value() Snapshot {
- if !iter.page.NotDone() {
- return Snapshot{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the SnapshotListIterator type.
-func NewSnapshotListIterator(page SnapshotListPage) SnapshotListIterator {
- return SnapshotListIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (sl SnapshotList) IsEmpty() bool {
- return sl.Value == nil || len(*sl.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (sl SnapshotList) hasNextLink() bool {
- return sl.NextLink != nil && len(*sl.NextLink) != 0
-}
-
-// snapshotListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (sl SnapshotList) snapshotListPreparer(ctx context.Context) (*http.Request, error) {
- if !sl.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(sl.NextLink)))
-}
-
-// SnapshotListPage contains a page of Snapshot values.
-type SnapshotListPage struct {
- fn func(context.Context, SnapshotList) (SnapshotList, error)
- sl SnapshotList
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *SnapshotListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.sl)
- if err != nil {
- return err
- }
- page.sl = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *SnapshotListPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page SnapshotListPage) NotDone() bool {
- return !page.sl.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page SnapshotListPage) Response() SnapshotList {
- return page.sl
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page SnapshotListPage) Values() []Snapshot {
- if page.sl.IsEmpty() {
- return nil
- }
- return *page.sl.Value
-}
-
-// Creates a new instance of the SnapshotListPage type.
-func NewSnapshotListPage(cur SnapshotList, getNextPage func(context.Context, SnapshotList) (SnapshotList, error)) SnapshotListPage {
- return SnapshotListPage{
- fn: getNextPage,
- sl: cur,
- }
-}
-
-// SnapshotProperties snapshot resource properties.
-type SnapshotProperties struct {
- // TimeCreated - READ-ONLY; The time when the snapshot was created.
- TimeCreated *date.Time `json:"timeCreated,omitempty"`
- // OsType - The Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'HyperVGenerationV1', 'HyperVGenerationV2'
- HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
- // PurchasePlan - Purchase plan information for the image from which the source disk for the snapshot was originally created.
- PurchasePlan *PurchasePlan `json:"purchasePlan,omitempty"`
- // SupportedCapabilities - List of supported capabilities (like Accelerated Networking) for the image from which the source disk from the snapshot was originally created.
- SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"`
- // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created.
- CreationData *CreationData `json:"creationData,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only.
- DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"`
- // DiskState - The state of the snapshot. Possible values include: 'DiskStateUnattached', 'DiskStateAttached', 'DiskStateReserved', 'DiskStateFrozen', 'DiskStateActiveSAS', 'DiskStateActiveSASFrozen', 'DiskStateReadyToUpload', 'DiskStateActiveUpload'
- DiskState DiskState `json:"diskState,omitempty"`
- // UniqueID - READ-ONLY; Unique Guid identifying the resource.
- UniqueID *string `json:"uniqueId,omitempty"`
- // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
- EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
- // ProvisioningState - READ-ONLY; The disk provisioning state.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // Incremental - Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed.
- Incremental *bool `json:"incremental,omitempty"`
- // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
- Encryption *Encryption `json:"encryption,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
- NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
- // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
- DiskAccessID *string `json:"diskAccessId,omitempty"`
- // SupportsHibernation - Indicates the OS on a snapshot supports hibernation.
- SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
- PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
- // CompletionPercent - Percentage complete for the background copy when a resource is created via the CopyStart operation.
- CompletionPercent *float64 `json:"completionPercent,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for SnapshotProperties.
-func (sp SnapshotProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if sp.OsType != "" {
- objectMap["osType"] = sp.OsType
- }
- if sp.HyperVGeneration != "" {
- objectMap["hyperVGeneration"] = sp.HyperVGeneration
- }
- if sp.PurchasePlan != nil {
- objectMap["purchasePlan"] = sp.PurchasePlan
- }
- if sp.SupportedCapabilities != nil {
- objectMap["supportedCapabilities"] = sp.SupportedCapabilities
- }
- if sp.CreationData != nil {
- objectMap["creationData"] = sp.CreationData
- }
- if sp.DiskSizeGB != nil {
- objectMap["diskSizeGB"] = sp.DiskSizeGB
- }
- if sp.DiskState != "" {
- objectMap["diskState"] = sp.DiskState
- }
- if sp.EncryptionSettingsCollection != nil {
- objectMap["encryptionSettingsCollection"] = sp.EncryptionSettingsCollection
- }
- if sp.Incremental != nil {
- objectMap["incremental"] = sp.Incremental
- }
- if sp.Encryption != nil {
- objectMap["encryption"] = sp.Encryption
- }
- if sp.NetworkAccessPolicy != "" {
- objectMap["networkAccessPolicy"] = sp.NetworkAccessPolicy
- }
- if sp.DiskAccessID != nil {
- objectMap["diskAccessId"] = sp.DiskAccessID
- }
- if sp.SupportsHibernation != nil {
- objectMap["supportsHibernation"] = sp.SupportsHibernation
- }
- if sp.PublicNetworkAccess != "" {
- objectMap["publicNetworkAccess"] = sp.PublicNetworkAccess
- }
- if sp.CompletionPercent != nil {
- objectMap["completionPercent"] = sp.CompletionPercent
- }
- return json.Marshal(objectMap)
-}
-
-// SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type SnapshotsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(SnapshotsClient) (Snapshot, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *SnapshotsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for SnapshotsCreateOrUpdateFuture.Result.
-func (future *SnapshotsCreateOrUpdateFuture) result(client SnapshotsClient) (s Snapshot, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- s.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent {
- s, err = client.CreateOrUpdateResponder(s.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// SnapshotsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type SnapshotsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(SnapshotsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *SnapshotsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for SnapshotsDeleteFuture.Result.
-func (future *SnapshotsDeleteFuture) result(client SnapshotsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// SnapshotsGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type SnapshotsGrantAccessFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(SnapshotsClient) (AccessURI, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *SnapshotsGrantAccessFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for SnapshotsGrantAccessFuture.Result.
-func (future *SnapshotsGrantAccessFuture) result(client SnapshotsClient) (au AccessURI, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- au.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent {
- au, err = client.GrantAccessResponder(au.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// SnapshotSku the snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This is an
-// optional parameter for incremental snapshot and the default behavior is the SKU will be set to the same
-// sku as the previous snapshot
-type SnapshotSku struct {
- // Name - The sku name. Possible values include: 'SnapshotStorageAccountTypesStandardLRS', 'SnapshotStorageAccountTypesPremiumLRS', 'SnapshotStorageAccountTypesStandardZRS'
- Name SnapshotStorageAccountTypes `json:"name,omitempty"`
- // Tier - READ-ONLY; The sku tier.
- Tier *string `json:"tier,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for SnapshotSku.
-func (ss SnapshotSku) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ss.Name != "" {
- objectMap["name"] = ss.Name
- }
- return json.Marshal(objectMap)
-}
-
-// SnapshotsRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type SnapshotsRevokeAccessFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(SnapshotsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *SnapshotsRevokeAccessFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for SnapshotsRevokeAccessFuture.Result.
-func (future *SnapshotsRevokeAccessFuture) result(client SnapshotsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type SnapshotsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(SnapshotsClient) (Snapshot, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *SnapshotsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for SnapshotsUpdateFuture.Result.
-func (future *SnapshotsUpdateFuture) result(client SnapshotsClient) (s Snapshot, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- s.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent {
- s, err = client.UpdateResponder(s.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", s.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// SnapshotUpdate snapshot update resource.
-type SnapshotUpdate struct {
- *SnapshotUpdateProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
- Sku *SnapshotSku `json:"sku,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for SnapshotUpdate.
-func (su SnapshotUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if su.SnapshotUpdateProperties != nil {
- objectMap["properties"] = su.SnapshotUpdateProperties
- }
- if su.Tags != nil {
- objectMap["tags"] = su.Tags
- }
- if su.Sku != nil {
- objectMap["sku"] = su.Sku
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for SnapshotUpdate struct.
-func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var snapshotUpdateProperties SnapshotUpdateProperties
- err = json.Unmarshal(*v, &snapshotUpdateProperties)
- if err != nil {
- return err
- }
- su.SnapshotUpdateProperties = &snapshotUpdateProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- su.Tags = tags
- }
- case "sku":
- if v != nil {
- var sku SnapshotSku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- su.Sku = &sku
- }
- }
- }
-
- return nil
-}
-
-// SnapshotUpdateProperties snapshot resource update properties.
-type SnapshotUpdateProperties struct {
- // OsType - the Operating System type. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
- EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
- // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys.
- Encryption *Encryption `json:"encryption,omitempty"`
- // NetworkAccessPolicy - Possible values include: 'NetworkAccessPolicyAllowAll', 'NetworkAccessPolicyAllowPrivate', 'NetworkAccessPolicyDenyAll'
- NetworkAccessPolicy NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"`
- // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks.
- DiskAccessID *string `json:"diskAccessId,omitempty"`
- // SupportsHibernation - Indicates the OS on a snapshot supports hibernation.
- SupportsHibernation *bool `json:"supportsHibernation,omitempty"`
- // PublicNetworkAccess - Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled'
- PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"`
-}
-
-// SoftDeletePolicy contains information about the soft deletion policy of the gallery.
-type SoftDeletePolicy struct {
- // IsSoftDeleteEnabled - Enables soft-deletion for resources in this gallery, allowing them to be recovered within retention time.
- IsSoftDeleteEnabled *bool `json:"isSoftDeleteEnabled,omitempty"`
-}
-
-// SourceVault the vault id is an Azure Resource Manager Resource id in the form
-// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}
-type SourceVault struct {
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// SpotRestorePolicy specifies the Spot-Try-Restore properties for the virtual machine scale set.
-// With this property customer can enable or disable automatic restore of the evicted Spot VMSS VM
-// instances opportunistically based on capacity availability and pricing constraint.
-type SpotRestorePolicy struct {
- // Enabled - Enables the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints
- Enabled *bool `json:"enabled,omitempty"`
- // RestoreTimeout - Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances
- RestoreTimeout *string `json:"restoreTimeout,omitempty"`
-}
-
-// SSHConfiguration SSH configuration for Linux based VMs running on Azure
-type SSHConfiguration struct {
- // PublicKeys - The list of SSH public keys used to authenticate with linux based VMs.
- PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"`
-}
-
-// SSHPublicKey contains information about SSH certificate public key and the path on the Linux VM where
-// the public key is placed.
-type SSHPublicKey struct {
- // Path - Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys
- Path *string `json:"path,omitempty"`
- // KeyData - SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format.
For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure]https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed).
- KeyData *string `json:"keyData,omitempty"`
-}
-
-// SSHPublicKeyGenerateKeyPairResult response from generation of an SSH key pair.
-type SSHPublicKeyGenerateKeyPairResult struct {
- autorest.Response `json:"-"`
- // PrivateKey - Private key portion of the key pair used to authenticate to a virtual machine through ssh. The private key is returned in RFC3447 format and should be treated as a secret.
- PrivateKey *string `json:"privateKey,omitempty"`
- // PublicKey - Public key portion of the key pair used to authenticate to a virtual machine through ssh. The public key is in ssh-rsa format.
- PublicKey *string `json:"publicKey,omitempty"`
- // ID - The ARM resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{SshPublicKeyName}
- ID *string `json:"id,omitempty"`
-}
-
-// SSHPublicKeyResource specifies information about the SSH public key.
-type SSHPublicKeyResource struct {
- autorest.Response `json:"-"`
- // SSHPublicKeyResourceProperties - Properties of the SSH public key.
- *SSHPublicKeyResourceProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for SSHPublicKeyResource.
-func (spkr SSHPublicKeyResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if spkr.SSHPublicKeyResourceProperties != nil {
- objectMap["properties"] = spkr.SSHPublicKeyResourceProperties
- }
- if spkr.Location != nil {
- objectMap["location"] = spkr.Location
- }
- if spkr.Tags != nil {
- objectMap["tags"] = spkr.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for SSHPublicKeyResource struct.
-func (spkr *SSHPublicKeyResource) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var SSHPublicKeyResourceProperties SSHPublicKeyResourceProperties
- err = json.Unmarshal(*v, &SSHPublicKeyResourceProperties)
- if err != nil {
- return err
- }
- spkr.SSHPublicKeyResourceProperties = &SSHPublicKeyResourceProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- spkr.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- spkr.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- spkr.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- spkr.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- spkr.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// SSHPublicKeyResourceProperties properties of the SSH public key.
-type SSHPublicKeyResourceProperties struct {
- // PublicKey - SSH public key used to authenticate to a virtual machine through ssh. If this property is not initially provided when the resource is created, the publicKey property will be populated when generateKeyPair is called. If the public key is provided upon resource creation, the provided public key needs to be at least 2048-bit and in ssh-rsa format.
- PublicKey *string `json:"publicKey,omitempty"`
-}
-
-// SSHPublicKeysGroupListResult the list SSH public keys operation response.
-type SSHPublicKeysGroupListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of SSH public keys
- Value *[]SSHPublicKeyResource `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of SSH public keys. Call ListNext() with this URI to fetch the next page of SSH public keys.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// SSHPublicKeysGroupListResultIterator provides access to a complete listing of SSHPublicKeyResource
-// values.
-type SSHPublicKeysGroupListResultIterator struct {
- i int
- page SSHPublicKeysGroupListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *SSHPublicKeysGroupListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysGroupListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *SSHPublicKeysGroupListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter SSHPublicKeysGroupListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter SSHPublicKeysGroupListResultIterator) Response() SSHPublicKeysGroupListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter SSHPublicKeysGroupListResultIterator) Value() SSHPublicKeyResource {
- if !iter.page.NotDone() {
- return SSHPublicKeyResource{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the SSHPublicKeysGroupListResultIterator type.
-func NewSSHPublicKeysGroupListResultIterator(page SSHPublicKeysGroupListResultPage) SSHPublicKeysGroupListResultIterator {
- return SSHPublicKeysGroupListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (spkglr SSHPublicKeysGroupListResult) IsEmpty() bool {
- return spkglr.Value == nil || len(*spkglr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (spkglr SSHPublicKeysGroupListResult) hasNextLink() bool {
- return spkglr.NextLink != nil && len(*spkglr.NextLink) != 0
-}
-
-// sSHPublicKeysGroupListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (spkglr SSHPublicKeysGroupListResult) sSHPublicKeysGroupListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !spkglr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(spkglr.NextLink)))
-}
-
-// SSHPublicKeysGroupListResultPage contains a page of SSHPublicKeyResource values.
-type SSHPublicKeysGroupListResultPage struct {
- fn func(context.Context, SSHPublicKeysGroupListResult) (SSHPublicKeysGroupListResult, error)
- spkglr SSHPublicKeysGroupListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *SSHPublicKeysGroupListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysGroupListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.spkglr)
- if err != nil {
- return err
- }
- page.spkglr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *SSHPublicKeysGroupListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page SSHPublicKeysGroupListResultPage) NotDone() bool {
- return !page.spkglr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page SSHPublicKeysGroupListResultPage) Response() SSHPublicKeysGroupListResult {
- return page.spkglr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page SSHPublicKeysGroupListResultPage) Values() []SSHPublicKeyResource {
- if page.spkglr.IsEmpty() {
- return nil
- }
- return *page.spkglr.Value
-}
-
-// Creates a new instance of the SSHPublicKeysGroupListResultPage type.
-func NewSSHPublicKeysGroupListResultPage(cur SSHPublicKeysGroupListResult, getNextPage func(context.Context, SSHPublicKeysGroupListResult) (SSHPublicKeysGroupListResult, error)) SSHPublicKeysGroupListResultPage {
- return SSHPublicKeysGroupListResultPage{
- fn: getNextPage,
- spkglr: cur,
- }
-}
-
-// SSHPublicKeyUpdateResource specifies information about the SSH public key.
-type SSHPublicKeyUpdateResource struct {
- // SSHPublicKeyResourceProperties - Properties of the SSH public key.
- *SSHPublicKeyResourceProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for SSHPublicKeyUpdateResource.
-func (spkur SSHPublicKeyUpdateResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if spkur.SSHPublicKeyResourceProperties != nil {
- objectMap["properties"] = spkur.SSHPublicKeyResourceProperties
- }
- if spkur.Tags != nil {
- objectMap["tags"] = spkur.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for SSHPublicKeyUpdateResource struct.
-func (spkur *SSHPublicKeyUpdateResource) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var SSHPublicKeyResourceProperties SSHPublicKeyResourceProperties
- err = json.Unmarshal(*v, &SSHPublicKeyResourceProperties)
- if err != nil {
- return err
- }
- spkur.SSHPublicKeyResourceProperties = &SSHPublicKeyResourceProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- spkur.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// StatusCodeCount ...
-type StatusCodeCount struct {
- // Code - READ-ONLY; The instance view status code
- Code *string `json:"code,omitempty"`
- // Count - READ-ONLY; Number of instances having this status code
- Count *int32 `json:"count,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for StatusCodeCount.
-func (scc StatusCodeCount) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// StorageProfile specifies the storage settings for the virtual machine disks.
-type StorageProfile struct {
- // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations.
- ImageReference *ImageReference `json:"imageReference,omitempty"`
- // OsDisk - Specifies information about the operating system disk used by the virtual machine.
For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview).
- OsDisk *OSDisk `json:"osDisk,omitempty"`
- // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.
For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview).
- DataDisks *[]DataDisk `json:"dataDisks,omitempty"`
-}
-
-// SubResource ...
-type SubResource struct {
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// SubResourceReadOnly ...
-type SubResourceReadOnly struct {
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for SubResourceReadOnly.
-func (srro SubResourceReadOnly) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// SubResourceWithColocationStatus ...
-type SubResourceWithColocationStatus struct {
- // ColocationStatus - Describes colocation status of a resource in the Proximity Placement Group.
- ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// SupportedCapabilities list of supported capabilities (like accelerated networking) persisted on the disk
-// resource for VM use.
-type SupportedCapabilities struct {
- // AcceleratedNetwork - True if the image from which the OS disk is created supports accelerated networking.
- AcceleratedNetwork *bool `json:"acceleratedNetwork,omitempty"`
-}
-
-// TargetRegion describes the target region information.
-type TargetRegion struct {
- // Name - The name of the region.
- Name *string `json:"name,omitempty"`
- // RegionalReplicaCount - The number of replicas of the Image Version to be created per region. This property is updatable.
- RegionalReplicaCount *int32 `json:"regionalReplicaCount,omitempty"`
- // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS', 'StorageAccountTypePremiumLRS'
- StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
- Encryption *EncryptionImages `json:"encryption,omitempty"`
-}
-
-// TerminateNotificationProfile ...
-type TerminateNotificationProfile struct {
- // NotBeforeTimeout - Configurable length of time a Virtual Machine being deleted will have to potentially approve the Terminate Scheduled Event before the event is auto approved (timed out). The configuration must be specified in ISO 8601 format, the default value is 5 minutes (PT5M)
- NotBeforeTimeout *string `json:"notBeforeTimeout,omitempty"`
- // Enable - Specifies whether the Terminate Scheduled event is enabled or disabled.
- Enable *bool `json:"enable,omitempty"`
-}
-
-// ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api.
-type ThrottledRequestsInput struct {
- // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
- BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"`
- // FromTime - From time of the query
- FromTime *date.Time `json:"fromTime,omitempty"`
- // ToTime - To time of the query
- ToTime *date.Time `json:"toTime,omitempty"`
- // GroupByThrottlePolicy - Group query result by Throttle Policy applied.
- GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"`
- // GroupByOperationName - Group query result by Operation Name.
- GroupByOperationName *bool `json:"groupByOperationName,omitempty"`
- // GroupByResourceName - Group query result by Resource Name.
- GroupByResourceName *bool `json:"groupByResourceName,omitempty"`
- // GroupByClientApplicationID - Group query result by Client Application ID.
- GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"`
- // GroupByUserAgent - Group query result by User Agent.
- GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"`
-}
-
-// UefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual
-// machine.
Minimum api-version: 2020-12-01
-type UefiSettings struct {
- // SecureBootEnabled - Specifies whether secure boot should be enabled on the virtual machine.
Minimum api-version: 2020-12-01
- SecureBootEnabled *bool `json:"secureBootEnabled,omitempty"`
- // VTpmEnabled - Specifies whether vTPM should be enabled on the virtual machine.
Minimum api-version: 2020-12-01
- VTpmEnabled *bool `json:"vTpmEnabled,omitempty"`
-}
-
-// UpdateDomain defines an update domain for the cloud service.
-type UpdateDomain struct {
- autorest.Response `json:"-"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource Name
- Name *string `json:"name,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for UpdateDomain.
-func (ud UpdateDomain) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// UpdateDomainListResult ...
-type UpdateDomainListResult struct {
- autorest.Response `json:"-"`
- Value *[]UpdateDomain `json:"value,omitempty"`
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// UpdateDomainListResultIterator provides access to a complete listing of UpdateDomain values.
-type UpdateDomainListResultIterator struct {
- i int
- page UpdateDomainListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *UpdateDomainListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/UpdateDomainListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *UpdateDomainListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter UpdateDomainListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter UpdateDomainListResultIterator) Response() UpdateDomainListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter UpdateDomainListResultIterator) Value() UpdateDomain {
- if !iter.page.NotDone() {
- return UpdateDomain{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the UpdateDomainListResultIterator type.
-func NewUpdateDomainListResultIterator(page UpdateDomainListResultPage) UpdateDomainListResultIterator {
- return UpdateDomainListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (udlr UpdateDomainListResult) IsEmpty() bool {
- return udlr.Value == nil || len(*udlr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (udlr UpdateDomainListResult) hasNextLink() bool {
- return udlr.NextLink != nil && len(*udlr.NextLink) != 0
-}
-
-// updateDomainListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (udlr UpdateDomainListResult) updateDomainListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !udlr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(udlr.NextLink)))
-}
-
-// UpdateDomainListResultPage contains a page of UpdateDomain values.
-type UpdateDomainListResultPage struct {
- fn func(context.Context, UpdateDomainListResult) (UpdateDomainListResult, error)
- udlr UpdateDomainListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *UpdateDomainListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/UpdateDomainListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.udlr)
- if err != nil {
- return err
- }
- page.udlr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *UpdateDomainListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page UpdateDomainListResultPage) NotDone() bool {
- return !page.udlr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page UpdateDomainListResultPage) Response() UpdateDomainListResult {
- return page.udlr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page UpdateDomainListResultPage) Values() []UpdateDomain {
- if page.udlr.IsEmpty() {
- return nil
- }
- return *page.udlr.Value
-}
-
-// Creates a new instance of the UpdateDomainListResultPage type.
-func NewUpdateDomainListResultPage(cur UpdateDomainListResult, getNextPage func(context.Context, UpdateDomainListResult) (UpdateDomainListResult, error)) UpdateDomainListResultPage {
- return UpdateDomainListResultPage{
- fn: getNextPage,
- udlr: cur,
- }
-}
-
-// UpdateResource the Update Resource model definition.
-type UpdateResource struct {
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for UpdateResource.
-func (ur UpdateResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if ur.Tags != nil {
- objectMap["tags"] = ur.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UpdateResourceDefinition the Update Resource model definition.
-type UpdateResourceDefinition struct {
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for UpdateResourceDefinition.
-func (urd UpdateResourceDefinition) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if urd.Tags != nil {
- objectMap["tags"] = urd.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UpgradeOperationHistoricalStatusInfo virtual Machine Scale Set OS Upgrade History operation response.
-type UpgradeOperationHistoricalStatusInfo struct {
- // Properties - READ-ONLY; Information about the properties of the upgrade operation.
- Properties *UpgradeOperationHistoricalStatusInfoProperties `json:"properties,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - READ-ONLY; Resource location
- Location *string `json:"location,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for UpgradeOperationHistoricalStatusInfo.
-func (uohsi UpgradeOperationHistoricalStatusInfo) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// UpgradeOperationHistoricalStatusInfoProperties describes each OS upgrade on the Virtual Machine Scale
-// Set.
-type UpgradeOperationHistoricalStatusInfoProperties struct {
- // RunningStatus - READ-ONLY; Information about the overall status of the upgrade operation.
- RunningStatus *UpgradeOperationHistoryStatus `json:"runningStatus,omitempty"`
- // Progress - READ-ONLY; Counts of the VMs in each state.
- Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"`
- // Error - READ-ONLY; Error Details for this upgrade if there are any.
- Error *APIError `json:"error,omitempty"`
- // StartedBy - READ-ONLY; Invoker of the Upgrade Operation. Possible values include: 'UpgradeOperationInvokerUnknown', 'UpgradeOperationInvokerUser', 'UpgradeOperationInvokerPlatform'
- StartedBy UpgradeOperationInvoker `json:"startedBy,omitempty"`
- // TargetImageReference - READ-ONLY; Image Reference details
- TargetImageReference *ImageReference `json:"targetImageReference,omitempty"`
- // RollbackInfo - READ-ONLY; Information about OS rollback if performed
- RollbackInfo *RollbackStatusInfo `json:"rollbackInfo,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for UpgradeOperationHistoricalStatusInfoProperties.
-func (uohsip UpgradeOperationHistoricalStatusInfoProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// UpgradeOperationHistoryStatus information about the current running state of the overall upgrade.
-type UpgradeOperationHistoryStatus struct {
- // Code - READ-ONLY; Code indicating the current status of the upgrade. Possible values include: 'UpgradeStateRollingForward', 'UpgradeStateCancelled', 'UpgradeStateCompleted', 'UpgradeStateFaulted'
- Code UpgradeState `json:"code,omitempty"`
- // StartTime - READ-ONLY; Start time of the upgrade.
- StartTime *date.Time `json:"startTime,omitempty"`
- // EndTime - READ-ONLY; End time of the upgrade.
- EndTime *date.Time `json:"endTime,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for UpgradeOperationHistoryStatus.
-func (uohs UpgradeOperationHistoryStatus) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// UpgradePolicy describes an upgrade policy - automatic, manual, or rolling.
-type UpgradePolicy struct {
- // Mode - Specifies the mode of an upgrade to virtual machines in the scale set.
Possible values are:
**Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.
**Automatic** - All virtual machines in the scale set are automatically updated at the same time. Possible values include: 'UpgradeModeAutomatic', 'UpgradeModeManual', 'UpgradeModeRolling'
- Mode UpgradeMode `json:"mode,omitempty"`
- // RollingUpgradePolicy - The configuration parameters used while performing a rolling upgrade.
- RollingUpgradePolicy *RollingUpgradePolicy `json:"rollingUpgradePolicy,omitempty"`
- // AutomaticOSUpgradePolicy - Configuration parameters used for performing automatic OS Upgrade.
- AutomaticOSUpgradePolicy *AutomaticOSUpgradePolicy `json:"automaticOSUpgradePolicy,omitempty"`
-}
-
-// Usage describes Compute Resource Usage.
-type Usage struct {
- // Unit - An enum describing the unit of usage measurement.
- Unit *string `json:"unit,omitempty"`
- // CurrentValue - The current usage of the resource.
- CurrentValue *int32 `json:"currentValue,omitempty"`
- // Limit - The maximum permitted usage of the resource.
- Limit *int64 `json:"limit,omitempty"`
- // Name - The name of the type of usage.
- Name *UsageName `json:"name,omitempty"`
-}
-
-// UsageName the Usage Names.
-type UsageName struct {
- // Value - The name of the resource.
- Value *string `json:"value,omitempty"`
- // LocalizedValue - The localized name of the resource.
- LocalizedValue *string `json:"localizedValue,omitempty"`
-}
-
-// UserArtifactManage ...
-type UserArtifactManage struct {
- // Install - Required. The path and arguments to install the gallery application. This is limited to 4096 characters.
- Install *string `json:"install,omitempty"`
- // Remove - Required. The path and arguments to remove the gallery application. This is limited to 4096 characters.
- Remove *string `json:"remove,omitempty"`
- // Update - Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters.
- Update *string `json:"update,omitempty"`
-}
-
-// UserArtifactSource the source image from which the Image Version is going to be created.
-type UserArtifactSource struct {
- // MediaLink - Required. The mediaLink of the artifact, must be a readable storage page blob.
- MediaLink *string `json:"mediaLink,omitempty"`
- // DefaultConfigurationLink - Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob.
- DefaultConfigurationLink *string `json:"defaultConfigurationLink,omitempty"`
-}
-
-// VaultCertificate describes a single certificate reference in a Key Vault, and where the certificate
-// should reside on the VM.
-type VaultCertificate struct {
- // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:
{
"data":"",
"dataType":"pfx",
"password":""
}
To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows).
- CertificateURL *string `json:"certificateUrl,omitempty"`
- // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.
For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of these files are .pem formatted.
- CertificateStore *string `json:"certificateStore,omitempty"`
-}
-
-// VaultSecretGroup describes a set of certificates which are all in the same Key Vault.
-type VaultSecretGroup struct {
- // SourceVault - The relative URL of the Key Vault containing all of the certificates in VaultCertificates.
- SourceVault *SubResource `json:"sourceVault,omitempty"`
- // VaultCertificates - The list of key vault references in SourceVault which contain certificates.
- VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"`
-}
-
-// VirtualHardDisk describes the uri of a disk.
-type VirtualHardDisk struct {
- // URI - Specifies the virtual hard disk's uri.
- URI *string `json:"uri,omitempty"`
-}
-
-// VirtualMachine describes a Virtual Machine.
-type VirtualMachine struct {
- autorest.Response `json:"-"`
- // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
- Plan *Plan `json:"plan,omitempty"`
- *VirtualMachineProperties `json:"properties,omitempty"`
- // Resources - READ-ONLY; The virtual machine child extension resources.
- Resources *[]VirtualMachineExtension `json:"resources,omitempty"`
- // Identity - The identity of the virtual machine, if configured.
- Identity *VirtualMachineIdentity `json:"identity,omitempty"`
- // Zones - The virtual machine zones.
- Zones *[]string `json:"zones,omitempty"`
- // ExtendedLocation - The extended location of the Virtual Machine.
- ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachine.
-func (VM VirtualMachine) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if VM.Plan != nil {
- objectMap["plan"] = VM.Plan
- }
- if VM.VirtualMachineProperties != nil {
- objectMap["properties"] = VM.VirtualMachineProperties
- }
- if VM.Identity != nil {
- objectMap["identity"] = VM.Identity
- }
- if VM.Zones != nil {
- objectMap["zones"] = VM.Zones
- }
- if VM.ExtendedLocation != nil {
- objectMap["extendedLocation"] = VM.ExtendedLocation
- }
- if VM.Location != nil {
- objectMap["location"] = VM.Location
- }
- if VM.Tags != nil {
- objectMap["tags"] = VM.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachine struct.
-func (VM *VirtualMachine) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "plan":
- if v != nil {
- var plan Plan
- err = json.Unmarshal(*v, &plan)
- if err != nil {
- return err
- }
- VM.Plan = &plan
- }
- case "properties":
- if v != nil {
- var virtualMachineProperties VirtualMachineProperties
- err = json.Unmarshal(*v, &virtualMachineProperties)
- if err != nil {
- return err
- }
- VM.VirtualMachineProperties = &virtualMachineProperties
- }
- case "resources":
- if v != nil {
- var resources []VirtualMachineExtension
- err = json.Unmarshal(*v, &resources)
- if err != nil {
- return err
- }
- VM.Resources = &resources
- }
- case "identity":
- if v != nil {
- var identity VirtualMachineIdentity
- err = json.Unmarshal(*v, &identity)
- if err != nil {
- return err
- }
- VM.Identity = &identity
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- VM.Zones = &zones
- }
- case "extendedLocation":
- if v != nil {
- var extendedLocation ExtendedLocation
- err = json.Unmarshal(*v, &extendedLocation)
- if err != nil {
- return err
- }
- VM.ExtendedLocation = &extendedLocation
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- VM.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- VM.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- VM.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- VM.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- VM.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineAgentInstanceView the instance view of the VM Agent running on the virtual machine.
-type VirtualMachineAgentInstanceView struct {
- // VMAgentVersion - The VM Agent full version.
- VMAgentVersion *string `json:"vmAgentVersion,omitempty"`
- // ExtensionHandlers - The virtual machine extension handler instance view.
- ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// VirtualMachineAssessPatchesResult describes the properties of an AssessPatches result.
-type VirtualMachineAssessPatchesResult struct {
- autorest.Response `json:"-"`
- // Status - READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. At that point it will become "Unknown", "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values include: 'PatchOperationStatusUnknown', 'PatchOperationStatusInProgress', 'PatchOperationStatusFailed', 'PatchOperationStatusSucceeded', 'PatchOperationStatusCompletedWithWarnings'
- Status PatchOperationStatus `json:"status,omitempty"`
- // AssessmentActivityID - READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension logs.
- AssessmentActivityID *string `json:"assessmentActivityId,omitempty"`
- // RebootPending - READ-ONLY; The overall reboot status of the VM. It will be true when partially installed patches require a reboot to complete installation but the reboot has not yet occurred.
- RebootPending *bool `json:"rebootPending,omitempty"`
- // CriticalAndSecurityPatchCount - READ-ONLY; The number of critical or security patches that have been detected as available and not yet installed.
- CriticalAndSecurityPatchCount *int32 `json:"criticalAndSecurityPatchCount,omitempty"`
- // OtherPatchCount - READ-ONLY; The number of all available patches excluding critical and security.
- OtherPatchCount *int32 `json:"otherPatchCount,omitempty"`
- // StartDateTime - READ-ONLY; The UTC timestamp when the operation began.
- StartDateTime *date.Time `json:"startDateTime,omitempty"`
- // AvailablePatches - READ-ONLY; The list of patches that have been detected as available for installation.
- AvailablePatches *[]VirtualMachineSoftwarePatchProperties `json:"availablePatches,omitempty"`
- // Error - READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them.
- Error *APIError `json:"error,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineAssessPatchesResult.
-func (vmapr VirtualMachineAssessPatchesResult) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineCaptureParameters capture Virtual Machine parameters.
-type VirtualMachineCaptureParameters struct {
- // VhdPrefix - The captured virtual hard disk's name prefix.
- VhdPrefix *string `json:"vhdPrefix,omitempty"`
- // DestinationContainerName - The destination container name.
- DestinationContainerName *string `json:"destinationContainerName,omitempty"`
- // OverwriteVhds - Specifies whether to overwrite the destination virtual hard disk, in case of conflict.
- OverwriteVhds *bool `json:"overwriteVhds,omitempty"`
-}
-
-// VirtualMachineCaptureResult output of virtual machine capture operation.
-type VirtualMachineCaptureResult struct {
- autorest.Response `json:"-"`
- // Schema - READ-ONLY; the schema of the captured virtual machine
- Schema *string `json:"$schema,omitempty"`
- // ContentVersion - READ-ONLY; the version of the content
- ContentVersion *string `json:"contentVersion,omitempty"`
- // Parameters - READ-ONLY; parameters of the captured virtual machine
- Parameters interface{} `json:"parameters,omitempty"`
- // Resources - READ-ONLY; a list of resource items of the captured virtual machine
- Resources *[]interface{} `json:"resources,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineCaptureResult.
-func (vmcr VirtualMachineCaptureResult) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmcr.ID != nil {
- objectMap["id"] = vmcr.ID
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineExtension describes a Virtual Machine Extension.
-type VirtualMachineExtension struct {
- autorest.Response `json:"-"`
- *VirtualMachineExtensionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineExtension.
-func (vme VirtualMachineExtension) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vme.VirtualMachineExtensionProperties != nil {
- objectMap["properties"] = vme.VirtualMachineExtensionProperties
- }
- if vme.Location != nil {
- objectMap["location"] = vme.Location
- }
- if vme.Tags != nil {
- objectMap["tags"] = vme.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtension struct.
-func (vme *VirtualMachineExtension) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var virtualMachineExtensionProperties VirtualMachineExtensionProperties
- err = json.Unmarshal(*v, &virtualMachineExtensionProperties)
- if err != nil {
- return err
- }
- vme.VirtualMachineExtensionProperties = &virtualMachineExtensionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vme.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vme.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vme.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- vme.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vme.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineExtensionHandlerInstanceView the instance view of a virtual machine extension handler.
-type VirtualMachineExtensionHandlerInstanceView struct {
- // Type - Specifies the type of the extension; an example is "CustomScriptExtension".
- Type *string `json:"type,omitempty"`
- // TypeHandlerVersion - Specifies the version of the script handler.
- TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"`
- // Status - The extension handler status.
- Status *InstanceViewStatus `json:"status,omitempty"`
-}
-
-// VirtualMachineExtensionImage describes a Virtual Machine Extension Image.
-type VirtualMachineExtensionImage struct {
- autorest.Response `json:"-"`
- *VirtualMachineExtensionImageProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineExtensionImage.
-func (vmei VirtualMachineExtensionImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmei.VirtualMachineExtensionImageProperties != nil {
- objectMap["properties"] = vmei.VirtualMachineExtensionImageProperties
- }
- if vmei.Location != nil {
- objectMap["location"] = vmei.Location
- }
- if vmei.Tags != nil {
- objectMap["tags"] = vmei.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtensionImage struct.
-func (vmei *VirtualMachineExtensionImage) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var virtualMachineExtensionImageProperties VirtualMachineExtensionImageProperties
- err = json.Unmarshal(*v, &virtualMachineExtensionImageProperties)
- if err != nil {
- return err
- }
- vmei.VirtualMachineExtensionImageProperties = &virtualMachineExtensionImageProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmei.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmei.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vmei.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- vmei.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmei.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineExtensionImageProperties describes the properties of a Virtual Machine Extension Image.
-type VirtualMachineExtensionImageProperties struct {
- // OperatingSystem - The operating system this extension supports.
- OperatingSystem *string `json:"operatingSystem,omitempty"`
- // ComputeRole - The type of role (IaaS or PaaS) this extension supports.
- ComputeRole *string `json:"computeRole,omitempty"`
- // HandlerSchema - The schema defined by publisher, where extension consumers should provide settings in a matching schema.
- HandlerSchema *string `json:"handlerSchema,omitempty"`
- // VMScaleSetEnabled - Whether the extension can be used on xRP VMScaleSets. By default existing extensions are usable on scalesets, but there might be cases where a publisher wants to explicitly indicate the extension is only enabled for CRP VMs but not VMSS.
- VMScaleSetEnabled *bool `json:"vmScaleSetEnabled,omitempty"`
- // SupportsMultipleExtensions - Whether the handler can support multiple extensions.
- SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"`
-}
-
-// VirtualMachineExtensionInstanceView the instance view of a virtual machine extension.
-type VirtualMachineExtensionInstanceView struct {
- // Name - The virtual machine extension name.
- Name *string `json:"name,omitempty"`
- // Type - Specifies the type of the extension; an example is "CustomScriptExtension".
- Type *string `json:"type,omitempty"`
- // TypeHandlerVersion - Specifies the version of the script handler.
- TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"`
- // Substatuses - The resource status information.
- Substatuses *[]InstanceViewStatus `json:"substatuses,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// VirtualMachineExtensionProperties describes the properties of a Virtual Machine Extension.
-type VirtualMachineExtensionProperties struct {
- // ForceUpdateTag - How the extension handler should be forced to update even if the extension configuration has not changed.
- ForceUpdateTag *string `json:"forceUpdateTag,omitempty"`
- // Publisher - The name of the extension handler publisher.
- Publisher *string `json:"publisher,omitempty"`
- // Type - Specifies the type of the extension; an example is "CustomScriptExtension".
- Type *string `json:"type,omitempty"`
- // TypeHandlerVersion - Specifies the version of the script handler.
- TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"`
- // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
- AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"`
- // EnableAutomaticUpgrade - Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.
- EnableAutomaticUpgrade *bool `json:"enableAutomaticUpgrade,omitempty"`
- // Settings - Json formatted public settings for the extension.
- Settings interface{} `json:"settings,omitempty"`
- // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
- ProtectedSettings interface{} `json:"protectedSettings,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // InstanceView - The virtual machine extension instance view.
- InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"`
- // SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false.
- SuppressFailures *bool `json:"suppressFailures,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineExtensionProperties.
-func (vmep VirtualMachineExtensionProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmep.ForceUpdateTag != nil {
- objectMap["forceUpdateTag"] = vmep.ForceUpdateTag
- }
- if vmep.Publisher != nil {
- objectMap["publisher"] = vmep.Publisher
- }
- if vmep.Type != nil {
- objectMap["type"] = vmep.Type
- }
- if vmep.TypeHandlerVersion != nil {
- objectMap["typeHandlerVersion"] = vmep.TypeHandlerVersion
- }
- if vmep.AutoUpgradeMinorVersion != nil {
- objectMap["autoUpgradeMinorVersion"] = vmep.AutoUpgradeMinorVersion
- }
- if vmep.EnableAutomaticUpgrade != nil {
- objectMap["enableAutomaticUpgrade"] = vmep.EnableAutomaticUpgrade
- }
- if vmep.Settings != nil {
- objectMap["settings"] = vmep.Settings
- }
- if vmep.ProtectedSettings != nil {
- objectMap["protectedSettings"] = vmep.ProtectedSettings
- }
- if vmep.InstanceView != nil {
- objectMap["instanceView"] = vmep.InstanceView
- }
- if vmep.SuppressFailures != nil {
- objectMap["suppressFailures"] = vmep.SuppressFailures
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
-// a long-running operation.
-type VirtualMachineExtensionsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineExtensionsClient) (VirtualMachineExtension, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineExtensionsCreateOrUpdateFuture.Result.
-func (future *VirtualMachineExtensionsCreateOrUpdateFuture) result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vme.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent {
- vme, err = client.CreateOrUpdateResponder(vme.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", vme.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineExtensionsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineExtensionsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineExtensionsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineExtensionsDeleteFuture.Result.
-func (future *VirtualMachineExtensionsDeleteFuture) result(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineExtensionsListResult the List Extension operation response
-type VirtualMachineExtensionsListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of extensions
- Value *[]VirtualMachineExtension `json:"value,omitempty"`
-}
-
-// VirtualMachineExtensionsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineExtensionsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineExtensionsClient) (VirtualMachineExtension, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineExtensionsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineExtensionsUpdateFuture.Result.
-func (future *VirtualMachineExtensionsUpdateFuture) result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vme.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent {
- vme, err = client.UpdateResponder(vme.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", vme.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineExtensionUpdate describes a Virtual Machine Extension.
-type VirtualMachineExtensionUpdate struct {
- *VirtualMachineExtensionUpdateProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineExtensionUpdate.
-func (vmeu VirtualMachineExtensionUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmeu.VirtualMachineExtensionUpdateProperties != nil {
- objectMap["properties"] = vmeu.VirtualMachineExtensionUpdateProperties
- }
- if vmeu.Tags != nil {
- objectMap["tags"] = vmeu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtensionUpdate struct.
-func (vmeu *VirtualMachineExtensionUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var virtualMachineExtensionUpdateProperties VirtualMachineExtensionUpdateProperties
- err = json.Unmarshal(*v, &virtualMachineExtensionUpdateProperties)
- if err != nil {
- return err
- }
- vmeu.VirtualMachineExtensionUpdateProperties = &virtualMachineExtensionUpdateProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmeu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineExtensionUpdateProperties describes the properties of a Virtual Machine Extension.
-type VirtualMachineExtensionUpdateProperties struct {
- // ForceUpdateTag - How the extension handler should be forced to update even if the extension configuration has not changed.
- ForceUpdateTag *string `json:"forceUpdateTag,omitempty"`
- // Publisher - The name of the extension handler publisher.
- Publisher *string `json:"publisher,omitempty"`
- // Type - Specifies the type of the extension; an example is "CustomScriptExtension".
- Type *string `json:"type,omitempty"`
- // TypeHandlerVersion - Specifies the version of the script handler.
- TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"`
- // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
- AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"`
- // EnableAutomaticUpgrade - Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.
- EnableAutomaticUpgrade *bool `json:"enableAutomaticUpgrade,omitempty"`
- // Settings - Json formatted public settings for the extension.
- Settings interface{} `json:"settings,omitempty"`
- // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
- ProtectedSettings interface{} `json:"protectedSettings,omitempty"`
- // SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false.
- SuppressFailures *bool `json:"suppressFailures,omitempty"`
-}
-
-// VirtualMachineHealthStatus the health status of the VM.
-type VirtualMachineHealthStatus struct {
- // Status - READ-ONLY; The health status information for the VM.
- Status *InstanceViewStatus `json:"status,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineHealthStatus.
-func (vmhs VirtualMachineHealthStatus) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineIdentity identity for the virtual machine.
-type VirtualMachineIdentity struct {
- // PrincipalID - READ-ONLY; The principal id of virtual machine identity. This property will only be provided for a system assigned identity.
- PrincipalID *string `json:"principalId,omitempty"`
- // TenantID - READ-ONLY; The tenant id associated with the virtual machine. This property will only be provided for a system assigned identity.
- TenantID *string `json:"tenantId,omitempty"`
- // Type - The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone'
- Type ResourceIdentityType `json:"type,omitempty"`
- // UserAssignedIdentities - The list of user identities associated with the Virtual Machine. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
- UserAssignedIdentities map[string]*VirtualMachineIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineIdentity.
-func (vmi VirtualMachineIdentity) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmi.Type != "" {
- objectMap["type"] = vmi.Type
- }
- if vmi.UserAssignedIdentities != nil {
- objectMap["userAssignedIdentities"] = vmi.UserAssignedIdentities
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineIdentityUserAssignedIdentitiesValue ...
-type VirtualMachineIdentityUserAssignedIdentitiesValue struct {
- // PrincipalID - READ-ONLY; The principal id of user assigned identity.
- PrincipalID *string `json:"principalId,omitempty"`
- // ClientID - READ-ONLY; The client id of user assigned identity.
- ClientID *string `json:"clientId,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineIdentityUserAssignedIdentitiesValue.
-func (vmiAiv VirtualMachineIdentityUserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineImage describes a Virtual Machine Image.
-type VirtualMachineImage struct {
- autorest.Response `json:"-"`
- *VirtualMachineImageProperties `json:"properties,omitempty"`
- // Name - The name of the resource.
- Name *string `json:"name,omitempty"`
- // Location - The supported Azure location of the resource.
- Location *string `json:"location,omitempty"`
- // Tags - Specifies the tags that are assigned to the virtual machine. For more information about using tags, see [Using tags to organize your Azure resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md).
- Tags map[string]*string `json:"tags"`
- // ExtendedLocation - The extended location of the Virtual Machine.
- ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineImage.
-func (vmi VirtualMachineImage) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmi.VirtualMachineImageProperties != nil {
- objectMap["properties"] = vmi.VirtualMachineImageProperties
- }
- if vmi.Name != nil {
- objectMap["name"] = vmi.Name
- }
- if vmi.Location != nil {
- objectMap["location"] = vmi.Location
- }
- if vmi.Tags != nil {
- objectMap["tags"] = vmi.Tags
- }
- if vmi.ExtendedLocation != nil {
- objectMap["extendedLocation"] = vmi.ExtendedLocation
- }
- if vmi.ID != nil {
- objectMap["id"] = vmi.ID
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineImage struct.
-func (vmi *VirtualMachineImage) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var virtualMachineImageProperties VirtualMachineImageProperties
- err = json.Unmarshal(*v, &virtualMachineImageProperties)
- if err != nil {
- return err
- }
- vmi.VirtualMachineImageProperties = &virtualMachineImageProperties
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmi.Name = &name
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- vmi.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmi.Tags = tags
- }
- case "extendedLocation":
- if v != nil {
- var extendedLocation ExtendedLocation
- err = json.Unmarshal(*v, &extendedLocation)
- if err != nil {
- return err
- }
- vmi.ExtendedLocation = &extendedLocation
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmi.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineImageFeature specifies additional capabilities supported by the image
-type VirtualMachineImageFeature struct {
- // Name - The name of the feature.
- Name *string `json:"name,omitempty"`
- // Value - The corresponding value for the feature.
- Value *string `json:"value,omitempty"`
-}
-
-// VirtualMachineImageProperties describes the properties of a Virtual Machine Image.
-type VirtualMachineImageProperties struct {
- Plan *PurchasePlan `json:"plan,omitempty"`
- OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"`
- DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"`
- AutomaticOSUpgradeProperties *AutomaticOSUpgradeProperties `json:"automaticOSUpgradeProperties,omitempty"`
- // HyperVGeneration - Possible values include: 'HyperVGenerationTypesV1', 'HyperVGenerationTypesV2'
- HyperVGeneration HyperVGenerationTypes `json:"hyperVGeneration,omitempty"`
- // Disallowed - Specifies disallowed configuration for the VirtualMachine created from the image
- Disallowed *DisallowedConfiguration `json:"disallowed,omitempty"`
- Features *[]VirtualMachineImageFeature `json:"features,omitempty"`
-}
-
-// VirtualMachineImageResource virtual machine image resource information.
-type VirtualMachineImageResource struct {
- // Name - The name of the resource.
- Name *string `json:"name,omitempty"`
- // Location - The supported Azure location of the resource.
- Location *string `json:"location,omitempty"`
- // Tags - Specifies the tags that are assigned to the virtual machine. For more information about using tags, see [Using tags to organize your Azure resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md).
- Tags map[string]*string `json:"tags"`
- // ExtendedLocation - The extended location of the Virtual Machine.
- ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineImageResource.
-func (vmir VirtualMachineImageResource) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmir.Name != nil {
- objectMap["name"] = vmir.Name
- }
- if vmir.Location != nil {
- objectMap["location"] = vmir.Location
- }
- if vmir.Tags != nil {
- objectMap["tags"] = vmir.Tags
- }
- if vmir.ExtendedLocation != nil {
- objectMap["extendedLocation"] = vmir.ExtendedLocation
- }
- if vmir.ID != nil {
- objectMap["id"] = vmir.ID
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineInstallPatchesParameters input for InstallPatches as directly received by the API
-type VirtualMachineInstallPatchesParameters struct {
- // MaximumDuration - Specifies the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)
- MaximumDuration *string `json:"maximumDuration,omitempty"`
- // RebootSetting - Defines when it is acceptable to reboot a VM during a software update operation. Possible values include: 'VMGuestPatchRebootSettingIfRequired', 'VMGuestPatchRebootSettingNever', 'VMGuestPatchRebootSettingAlways'
- RebootSetting VMGuestPatchRebootSetting `json:"rebootSetting,omitempty"`
- // WindowsParameters - Input for InstallPatches on a Windows VM, as directly received by the API
- WindowsParameters *WindowsParameters `json:"windowsParameters,omitempty"`
- // LinuxParameters - Input for InstallPatches on a Linux VM, as directly received by the API
- LinuxParameters *LinuxParameters `json:"linuxParameters,omitempty"`
-}
-
-// VirtualMachineInstallPatchesResult the result summary of an installation operation.
-type VirtualMachineInstallPatchesResult struct {
- autorest.Response `json:"-"`
- // Status - READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. At that point it will become "Failed", "Succeeded", "Unknown" or "CompletedWithWarnings.". Possible values include: 'PatchOperationStatusUnknown', 'PatchOperationStatusInProgress', 'PatchOperationStatusFailed', 'PatchOperationStatusSucceeded', 'PatchOperationStatusCompletedWithWarnings'
- Status PatchOperationStatus `json:"status,omitempty"`
- // InstallationActivityID - READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension logs.
- InstallationActivityID *string `json:"installationActivityId,omitempty"`
- // RebootStatus - READ-ONLY; The reboot state of the VM following completion of the operation. Possible values include: 'VMGuestPatchRebootStatusUnknown', 'VMGuestPatchRebootStatusNotNeeded', 'VMGuestPatchRebootStatusRequired', 'VMGuestPatchRebootStatusStarted', 'VMGuestPatchRebootStatusFailed', 'VMGuestPatchRebootStatusCompleted'
- RebootStatus VMGuestPatchRebootStatus `json:"rebootStatus,omitempty"`
- // MaintenanceWindowExceeded - READ-ONLY; Whether the operation ran out of time before it completed all its intended actions.
- MaintenanceWindowExceeded *bool `json:"maintenanceWindowExceeded,omitempty"`
- // ExcludedPatchCount - READ-ONLY; The number of patches that were not installed due to the user blocking their installation.
- ExcludedPatchCount *int32 `json:"excludedPatchCount,omitempty"`
- // NotSelectedPatchCount - READ-ONLY; The number of patches that were detected as available for install, but did not meet the operation's criteria.
- NotSelectedPatchCount *int32 `json:"notSelectedPatchCount,omitempty"`
- // PendingPatchCount - READ-ONLY; The number of patches that were identified as meeting the installation criteria, but were not able to be installed. Typically this happens when maintenanceWindowExceeded == true.
- PendingPatchCount *int32 `json:"pendingPatchCount,omitempty"`
- // InstalledPatchCount - READ-ONLY; The number of patches successfully installed.
- InstalledPatchCount *int32 `json:"installedPatchCount,omitempty"`
- // FailedPatchCount - READ-ONLY; The number of patches that could not be installed due to some issue. See errors for details.
- FailedPatchCount *int32 `json:"failedPatchCount,omitempty"`
- // Patches - READ-ONLY; The patches that were installed during the operation.
- Patches *[]PatchInstallationDetail `json:"patches,omitempty"`
- // StartDateTime - READ-ONLY; The UTC timestamp when the operation began.
- StartDateTime *date.Time `json:"startDateTime,omitempty"`
- // Error - READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them.
- Error *APIError `json:"error,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineInstallPatchesResult.
-func (vmipr VirtualMachineInstallPatchesResult) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineInstanceView the instance view of a virtual machine.
-type VirtualMachineInstanceView struct {
- autorest.Response `json:"-"`
- // PlatformUpdateDomain - Specifies the update domain of the virtual machine.
- PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"`
- // PlatformFaultDomain - Specifies the fault domain of the virtual machine.
- PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"`
- // ComputerName - The computer name assigned to the virtual machine.
- ComputerName *string `json:"computerName,omitempty"`
- // OsName - The Operating System running on the virtual machine.
- OsName *string `json:"osName,omitempty"`
- // OsVersion - The version of Operating System running on the virtual machine.
- OsVersion *string `json:"osVersion,omitempty"`
- // HyperVGeneration - Specifies the HyperVGeneration Type associated with a resource. Possible values include: 'HyperVGenerationTypeV1', 'HyperVGenerationTypeV2'
- HyperVGeneration HyperVGenerationType `json:"hyperVGeneration,omitempty"`
- // RdpThumbPrint - The Remote desktop certificate thumbprint.
- RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"`
- // VMAgent - The VM Agent running on the virtual machine.
- VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"`
- // MaintenanceRedeployStatus - The Maintenance Operation status on the virtual machine.
- MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"`
- // Disks - The virtual machine disk information.
- Disks *[]DiskInstanceView `json:"disks,omitempty"`
- // Extensions - The extensions information.
- Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"`
- // VMHealth - READ-ONLY; The health status for the VM.
- VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty"`
- // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.
You can easily view the output of your console log.
Azure also enables you to see a screenshot of the VM from the hypervisor.
- BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"`
- // AssignedHost - READ-ONLY; Resource id of the dedicated host, on which the virtual machine is allocated through automatic placement, when the virtual machine is associated with a dedicated host group that has automatic placement enabled.
Minimum api-version: 2020-06-01.
- AssignedHost *string `json:"assignedHost,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
- // PatchStatus - [Preview Feature] The status of virtual machine patch operations.
- PatchStatus *VirtualMachinePatchStatus `json:"patchStatus,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineInstanceView.
-func (vmiv VirtualMachineInstanceView) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmiv.PlatformUpdateDomain != nil {
- objectMap["platformUpdateDomain"] = vmiv.PlatformUpdateDomain
- }
- if vmiv.PlatformFaultDomain != nil {
- objectMap["platformFaultDomain"] = vmiv.PlatformFaultDomain
- }
- if vmiv.ComputerName != nil {
- objectMap["computerName"] = vmiv.ComputerName
- }
- if vmiv.OsName != nil {
- objectMap["osName"] = vmiv.OsName
- }
- if vmiv.OsVersion != nil {
- objectMap["osVersion"] = vmiv.OsVersion
- }
- if vmiv.HyperVGeneration != "" {
- objectMap["hyperVGeneration"] = vmiv.HyperVGeneration
- }
- if vmiv.RdpThumbPrint != nil {
- objectMap["rdpThumbPrint"] = vmiv.RdpThumbPrint
- }
- if vmiv.VMAgent != nil {
- objectMap["vmAgent"] = vmiv.VMAgent
- }
- if vmiv.MaintenanceRedeployStatus != nil {
- objectMap["maintenanceRedeployStatus"] = vmiv.MaintenanceRedeployStatus
- }
- if vmiv.Disks != nil {
- objectMap["disks"] = vmiv.Disks
- }
- if vmiv.Extensions != nil {
- objectMap["extensions"] = vmiv.Extensions
- }
- if vmiv.BootDiagnostics != nil {
- objectMap["bootDiagnostics"] = vmiv.BootDiagnostics
- }
- if vmiv.Statuses != nil {
- objectMap["statuses"] = vmiv.Statuses
- }
- if vmiv.PatchStatus != nil {
- objectMap["patchStatus"] = vmiv.PatchStatus
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineIPTag contains the IP tag associated with the public IP address.
-type VirtualMachineIPTag struct {
- // IPTagType - IP tag type. Example: FirstPartyUsage.
- IPTagType *string `json:"ipTagType,omitempty"`
- // Tag - IP tag associated with the public IP. Example: SQL, Storage etc.
- Tag *string `json:"tag,omitempty"`
-}
-
-// VirtualMachineListResult the List Virtual Machine operation response.
-type VirtualMachineListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of virtual machines.
- Value *[]VirtualMachine `json:"value,omitempty"`
- // NextLink - The URI to fetch the next page of VMs. Call ListNext() with this URI to fetch the next page of Virtual Machines.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// VirtualMachineListResultIterator provides access to a complete listing of VirtualMachine values.
-type VirtualMachineListResultIterator struct {
- i int
- page VirtualMachineListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *VirtualMachineListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *VirtualMachineListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter VirtualMachineListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter VirtualMachineListResultIterator) Response() VirtualMachineListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter VirtualMachineListResultIterator) Value() VirtualMachine {
- if !iter.page.NotDone() {
- return VirtualMachine{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the VirtualMachineListResultIterator type.
-func NewVirtualMachineListResultIterator(page VirtualMachineListResultPage) VirtualMachineListResultIterator {
- return VirtualMachineListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (vmlr VirtualMachineListResult) IsEmpty() bool {
- return vmlr.Value == nil || len(*vmlr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (vmlr VirtualMachineListResult) hasNextLink() bool {
- return vmlr.NextLink != nil && len(*vmlr.NextLink) != 0
-}
-
-// virtualMachineListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (vmlr VirtualMachineListResult) virtualMachineListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !vmlr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(vmlr.NextLink)))
-}
-
-// VirtualMachineListResultPage contains a page of VirtualMachine values.
-type VirtualMachineListResultPage struct {
- fn func(context.Context, VirtualMachineListResult) (VirtualMachineListResult, error)
- vmlr VirtualMachineListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *VirtualMachineListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.vmlr)
- if err != nil {
- return err
- }
- page.vmlr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *VirtualMachineListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page VirtualMachineListResultPage) NotDone() bool {
- return !page.vmlr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page VirtualMachineListResultPage) Response() VirtualMachineListResult {
- return page.vmlr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page VirtualMachineListResultPage) Values() []VirtualMachine {
- if page.vmlr.IsEmpty() {
- return nil
- }
- return *page.vmlr.Value
-}
-
-// Creates a new instance of the VirtualMachineListResultPage type.
-func NewVirtualMachineListResultPage(cur VirtualMachineListResult, getNextPage func(context.Context, VirtualMachineListResult) (VirtualMachineListResult, error)) VirtualMachineListResultPage {
- return VirtualMachineListResultPage{
- fn: getNextPage,
- vmlr: cur,
- }
-}
-
-// VirtualMachineNetworkInterfaceConfiguration describes a virtual machine network interface
-// configurations.
-type VirtualMachineNetworkInterfaceConfiguration struct {
- // Name - The network interface configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachineNetworkInterfaceConfigurationProperties `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineNetworkInterfaceConfiguration.
-func (vmnic VirtualMachineNetworkInterfaceConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmnic.Name != nil {
- objectMap["name"] = vmnic.Name
- }
- if vmnic.VirtualMachineNetworkInterfaceConfigurationProperties != nil {
- objectMap["properties"] = vmnic.VirtualMachineNetworkInterfaceConfigurationProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineNetworkInterfaceConfiguration struct.
-func (vmnic *VirtualMachineNetworkInterfaceConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmnic.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachineNetworkInterfaceConfigurationProperties VirtualMachineNetworkInterfaceConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachineNetworkInterfaceConfigurationProperties)
- if err != nil {
- return err
- }
- vmnic.VirtualMachineNetworkInterfaceConfigurationProperties = &virtualMachineNetworkInterfaceConfigurationProperties
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineNetworkInterfaceConfigurationProperties describes a virtual machine network profile's IP
-// configuration.
-type VirtualMachineNetworkInterfaceConfigurationProperties struct {
- // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface.
- Primary *bool `json:"primary,omitempty"`
- // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
- DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
- // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled.
- EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"`
- // EnableFpga - Specifies whether the network interface is FPGA networking-enabled.
- EnableFpga *bool `json:"enableFpga,omitempty"`
- // EnableIPForwarding - Whether IP forwarding enabled on this NIC.
- EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"`
- // NetworkSecurityGroup - The network security group.
- NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"`
- // DNSSettings - The dns settings to be applied on the network interfaces.
- DNSSettings *VirtualMachineNetworkInterfaceDNSSettingsConfiguration `json:"dnsSettings,omitempty"`
- // IPConfigurations - Specifies the IP configurations of the network interface.
- IPConfigurations *[]VirtualMachineNetworkInterfaceIPConfiguration `json:"ipConfigurations,omitempty"`
- DscpConfiguration *SubResource `json:"dscpConfiguration,omitempty"`
-}
-
-// VirtualMachineNetworkInterfaceDNSSettingsConfiguration describes a virtual machines network
-// configuration's DNS settings.
-type VirtualMachineNetworkInterfaceDNSSettingsConfiguration struct {
- // DNSServers - List of DNS servers IP addresses
- DNSServers *[]string `json:"dnsServers,omitempty"`
-}
-
-// VirtualMachineNetworkInterfaceIPConfiguration describes a virtual machine network profile's IP
-// configuration.
-type VirtualMachineNetworkInterfaceIPConfiguration struct {
- // Name - The IP configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachineNetworkInterfaceIPConfigurationProperties `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineNetworkInterfaceIPConfiguration.
-func (vmniic VirtualMachineNetworkInterfaceIPConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmniic.Name != nil {
- objectMap["name"] = vmniic.Name
- }
- if vmniic.VirtualMachineNetworkInterfaceIPConfigurationProperties != nil {
- objectMap["properties"] = vmniic.VirtualMachineNetworkInterfaceIPConfigurationProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineNetworkInterfaceIPConfiguration struct.
-func (vmniic *VirtualMachineNetworkInterfaceIPConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmniic.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachineNetworkInterfaceIPConfigurationProperties VirtualMachineNetworkInterfaceIPConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachineNetworkInterfaceIPConfigurationProperties)
- if err != nil {
- return err
- }
- vmniic.VirtualMachineNetworkInterfaceIPConfigurationProperties = &virtualMachineNetworkInterfaceIPConfigurationProperties
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineNetworkInterfaceIPConfigurationProperties describes a virtual machine network interface IP
-// configuration properties.
-type VirtualMachineNetworkInterfaceIPConfigurationProperties struct {
- // Subnet - Specifies the identifier of the subnet.
- Subnet *SubResource `json:"subnet,omitempty"`
- // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface.
- Primary *bool `json:"primary,omitempty"`
- // PublicIPAddressConfiguration - The publicIPAddressConfiguration.
- PublicIPAddressConfiguration *VirtualMachinePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"`
- // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionsIPv4', 'IPVersionsIPv6'
- PrivateIPAddressVersion IPVersions `json:"privateIPAddressVersion,omitempty"`
- // ApplicationSecurityGroups - Specifies an array of references to application security group.
- ApplicationSecurityGroups *[]SubResource `json:"applicationSecurityGroups,omitempty"`
- // ApplicationGatewayBackendAddressPools - Specifies an array of references to backend address pools of application gateways. A virtual machine can reference backend address pools of multiple application gateways. Multiple virtual machines cannot use the same application gateway.
- ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"`
- // LoadBalancerBackendAddressPools - Specifies an array of references to backend address pools of load balancers. A virtual machine can reference backend address pools of one public and one internal load balancer. [Multiple virtual machines cannot use the same basic sku load balancer].
- LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"`
-}
-
-// VirtualMachinePatchStatus the status of virtual machine patch operations.
-type VirtualMachinePatchStatus struct {
- // AvailablePatchSummary - The available patch summary of the latest assessment operation for the virtual machine.
- AvailablePatchSummary *AvailablePatchSummary `json:"availablePatchSummary,omitempty"`
- // LastPatchInstallationSummary - The installation summary of the latest installation operation for the virtual machine.
- LastPatchInstallationSummary *LastPatchInstallationSummary `json:"lastPatchInstallationSummary,omitempty"`
- // ConfigurationStatuses - READ-ONLY; The enablement status of the specified patchMode
- ConfigurationStatuses *[]InstanceViewStatus `json:"configurationStatuses,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachinePatchStatus.
-func (vmps VirtualMachinePatchStatus) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmps.AvailablePatchSummary != nil {
- objectMap["availablePatchSummary"] = vmps.AvailablePatchSummary
- }
- if vmps.LastPatchInstallationSummary != nil {
- objectMap["lastPatchInstallationSummary"] = vmps.LastPatchInstallationSummary
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineProperties describes the properties of a Virtual Machine.
-type VirtualMachineProperties struct {
- // HardwareProfile - Specifies the hardware settings for the virtual machine.
- HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"`
- // StorageProfile - Specifies the storage settings for the virtual machine disks.
- StorageProfile *StorageProfile `json:"storageProfile,omitempty"`
- // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine.
- AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"`
- // OsProfile - Specifies the operating system settings used while creating the virtual machine. Some of the settings cannot be changed once VM is provisioned.
- OsProfile *OSProfile `json:"osProfile,omitempty"`
- // NetworkProfile - Specifies the network interfaces of the virtual machine.
- NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"`
- // SecurityProfile - Specifies the Security related profile settings for the virtual machine.
- SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"`
- // DiagnosticsProfile - Specifies the boot diagnostic settings state.
Minimum api-version: 2015-06-15.
- DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"`
- // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Availability sets overview](https://docs.microsoft.com/azure/virtual-machines/availability-set-overview).
For more information on Azure planned maintenance, see [Maintenance and updates for Virtual Machines in Azure](https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates)
Currently, a VM can only be added to availability set at creation time. The availability set to which the VM is being added should be under the same resource group as the availability set resource. An existing VM cannot be added to an availability set.
This property cannot exist along with a non-null properties.virtualMachineScaleSet reference.
- AvailabilitySet *SubResource `json:"availabilitySet,omitempty"`
- // VirtualMachineScaleSet - Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing VM cannot be added to a virtual machine scale set.
This property cannot exist along with a non-null properties.availabilitySet reference.
Minimum api‐version: 2019‐03‐01
- VirtualMachineScaleSet *SubResource `json:"virtualMachineScaleSet,omitempty"`
- // ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine should be assigned to.
Minimum api-version: 2018-04-01.
- ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"`
- // Priority - Specifies the priority for the virtual machine.
Minimum api-version: 2019-03-01. Possible values include: 'VirtualMachinePriorityTypesRegular', 'VirtualMachinePriorityTypesLow', 'VirtualMachinePriorityTypesSpot'
- Priority VirtualMachinePriorityTypes `json:"priority,omitempty"`
- // EvictionPolicy - Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set.
For Azure Spot virtual machines, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. Possible values include: 'VirtualMachineEvictionPolicyTypesDeallocate', 'VirtualMachineEvictionPolicyTypesDelete'
- EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"`
- // BillingProfile - Specifies the billing related details of a Azure Spot virtual machine.
Minimum api-version: 2019-03-01.
- BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
- // Host - Specifies information about the dedicated host that the virtual machine resides in.
Minimum api-version: 2018-10-01.
- Host *SubResource `json:"host,omitempty"`
- // HostGroup - Specifies information about the dedicated host group that the virtual machine resides in.
Minimum api-version: 2020-06-01.
NOTE: User cannot specify both host and hostGroup properties.
- HostGroup *SubResource `json:"hostGroup,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // InstanceView - READ-ONLY; The virtual machine instance view.
- InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"`
- // LicenseType - Specifies that the image or disk that is being used was licensed on-premises.
Possible values for Windows Server operating system are:
Windows_Client
Windows_Server
Possible values for Linux Server operating system are:
RHEL_BYOS (for RHEL)
SLES_BYOS (for SUSE)
For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing)
[Azure Hybrid Use Benefit for Linux Server](https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux)
Minimum api-version: 2015-06-15
- LicenseType *string `json:"licenseType,omitempty"`
- // VMID - READ-ONLY; Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands.
- VMID *string `json:"vmId,omitempty"`
- // ExtensionsTimeBudget - Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. The default value is 90 minutes (PT1H30M).
Minimum api-version: 2020-06-01
- ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty"`
- // PlatformFaultDomain - Specifies the scale set logical fault domain into which the Virtual Machine will be created. By default, the Virtual Machine will by automatically assigned to a fault domain that best maintains balance across available fault domains.
This is applicable only if the 'virtualMachineScaleSet' property of this Virtual Machine is set.The Virtual Machine Scale Set that is referenced, must have 'platformFaultDomainCount' > 1.This property cannot be updated once the Virtual Machine is created.Fault domain assignment can be viewed in the Virtual Machine Instance View.
Minimum api‐version: 2020‐12‐01
- PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"`
- // ScheduledEventsProfile - Specifies Scheduled Event related configurations.
- ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"`
- // UserData - UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here.
Minimum api-version: 2021-03-01
- UserData *string `json:"userData,omitempty"`
- // CapacityReservation - Specifies information about the capacity reservation that is used to allocate virtual machine.
Minimum api-version: 2021-04-01.
- CapacityReservation *CapacityReservationProfile `json:"capacityReservation,omitempty"`
- // ApplicationProfile - Specifies the gallery applications that should be made available to the VM/VMSS
- ApplicationProfile *ApplicationProfile `json:"applicationProfile,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineProperties.
-func (vmp VirtualMachineProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmp.HardwareProfile != nil {
- objectMap["hardwareProfile"] = vmp.HardwareProfile
- }
- if vmp.StorageProfile != nil {
- objectMap["storageProfile"] = vmp.StorageProfile
- }
- if vmp.AdditionalCapabilities != nil {
- objectMap["additionalCapabilities"] = vmp.AdditionalCapabilities
- }
- if vmp.OsProfile != nil {
- objectMap["osProfile"] = vmp.OsProfile
- }
- if vmp.NetworkProfile != nil {
- objectMap["networkProfile"] = vmp.NetworkProfile
- }
- if vmp.SecurityProfile != nil {
- objectMap["securityProfile"] = vmp.SecurityProfile
- }
- if vmp.DiagnosticsProfile != nil {
- objectMap["diagnosticsProfile"] = vmp.DiagnosticsProfile
- }
- if vmp.AvailabilitySet != nil {
- objectMap["availabilitySet"] = vmp.AvailabilitySet
- }
- if vmp.VirtualMachineScaleSet != nil {
- objectMap["virtualMachineScaleSet"] = vmp.VirtualMachineScaleSet
- }
- if vmp.ProximityPlacementGroup != nil {
- objectMap["proximityPlacementGroup"] = vmp.ProximityPlacementGroup
- }
- if vmp.Priority != "" {
- objectMap["priority"] = vmp.Priority
- }
- if vmp.EvictionPolicy != "" {
- objectMap["evictionPolicy"] = vmp.EvictionPolicy
- }
- if vmp.BillingProfile != nil {
- objectMap["billingProfile"] = vmp.BillingProfile
- }
- if vmp.Host != nil {
- objectMap["host"] = vmp.Host
- }
- if vmp.HostGroup != nil {
- objectMap["hostGroup"] = vmp.HostGroup
- }
- if vmp.LicenseType != nil {
- objectMap["licenseType"] = vmp.LicenseType
- }
- if vmp.ExtensionsTimeBudget != nil {
- objectMap["extensionsTimeBudget"] = vmp.ExtensionsTimeBudget
- }
- if vmp.PlatformFaultDomain != nil {
- objectMap["platformFaultDomain"] = vmp.PlatformFaultDomain
- }
- if vmp.ScheduledEventsProfile != nil {
- objectMap["scheduledEventsProfile"] = vmp.ScheduledEventsProfile
- }
- if vmp.UserData != nil {
- objectMap["userData"] = vmp.UserData
- }
- if vmp.CapacityReservation != nil {
- objectMap["capacityReservation"] = vmp.CapacityReservation
- }
- if vmp.ApplicationProfile != nil {
- objectMap["applicationProfile"] = vmp.ApplicationProfile
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachinePublicIPAddressConfiguration describes a virtual machines IP Configuration's
-// PublicIPAddress configuration
-type VirtualMachinePublicIPAddressConfiguration struct {
- // Name - The publicIP address configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachinePublicIPAddressConfigurationProperties `json:"properties,omitempty"`
- Sku *PublicIPAddressSku `json:"sku,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachinePublicIPAddressConfiguration.
-func (vmpiac VirtualMachinePublicIPAddressConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmpiac.Name != nil {
- objectMap["name"] = vmpiac.Name
- }
- if vmpiac.VirtualMachinePublicIPAddressConfigurationProperties != nil {
- objectMap["properties"] = vmpiac.VirtualMachinePublicIPAddressConfigurationProperties
- }
- if vmpiac.Sku != nil {
- objectMap["sku"] = vmpiac.Sku
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachinePublicIPAddressConfiguration struct.
-func (vmpiac *VirtualMachinePublicIPAddressConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmpiac.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachinePublicIPAddressConfigurationProperties VirtualMachinePublicIPAddressConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachinePublicIPAddressConfigurationProperties)
- if err != nil {
- return err
- }
- vmpiac.VirtualMachinePublicIPAddressConfigurationProperties = &virtualMachinePublicIPAddressConfigurationProperties
- }
- case "sku":
- if v != nil {
- var sku PublicIPAddressSku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- vmpiac.Sku = &sku
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachinePublicIPAddressConfigurationProperties describes a virtual machines IP Configuration's
-// PublicIPAddress configuration
-type VirtualMachinePublicIPAddressConfigurationProperties struct {
- // IdleTimeoutInMinutes - The idle timeout of the public IP address.
- IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"`
- // DeleteOption - Specify what happens to the public IP address when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
- DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
- // DNSSettings - The dns settings to be applied on the publicIP addresses .
- DNSSettings *VirtualMachinePublicIPAddressDNSSettingsConfiguration `json:"dnsSettings,omitempty"`
- // IPTags - The list of IP tags associated with the public IP address.
- IPTags *[]VirtualMachineIPTag `json:"ipTags,omitempty"`
- // PublicIPPrefix - The PublicIPPrefix from which to allocate publicIP addresses.
- PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"`
- // PublicIPAddressVersion - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionsIPv4', 'IPVersionsIPv6'
- PublicIPAddressVersion IPVersions `json:"publicIPAddressVersion,omitempty"`
- // PublicIPAllocationMethod - Specify the public IP allocation type. Possible values include: 'PublicIPAllocationMethodDynamic', 'PublicIPAllocationMethodStatic'
- PublicIPAllocationMethod PublicIPAllocationMethod `json:"publicIPAllocationMethod,omitempty"`
-}
-
-// VirtualMachinePublicIPAddressDNSSettingsConfiguration describes a virtual machines network
-// configuration's DNS settings.
-type VirtualMachinePublicIPAddressDNSSettingsConfiguration struct {
- // DomainNameLabel - The Domain name label prefix of the PublicIPAddress resources that will be created. The generated name label is the concatenation of the domain name label and vm network profile unique ID.
- DomainNameLabel *string `json:"domainNameLabel,omitempty"`
-}
-
-// VirtualMachineReimageParameters parameters for Reimaging Virtual Machine. NOTE: Virtual Machine OS disk
-// will always be reimaged
-type VirtualMachineReimageParameters struct {
- // TempDisk - Specifies whether to reimage temp disk. Default value: false. Note: This temp disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
- TempDisk *bool `json:"tempDisk,omitempty"`
-}
-
-// VirtualMachineRunCommand describes a Virtual Machine run command.
-type VirtualMachineRunCommand struct {
- autorest.Response `json:"-"`
- *VirtualMachineRunCommandProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineRunCommand.
-func (vmrc VirtualMachineRunCommand) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmrc.VirtualMachineRunCommandProperties != nil {
- objectMap["properties"] = vmrc.VirtualMachineRunCommandProperties
- }
- if vmrc.Location != nil {
- objectMap["location"] = vmrc.Location
- }
- if vmrc.Tags != nil {
- objectMap["tags"] = vmrc.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineRunCommand struct.
-func (vmrc *VirtualMachineRunCommand) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var virtualMachineRunCommandProperties VirtualMachineRunCommandProperties
- err = json.Unmarshal(*v, &virtualMachineRunCommandProperties)
- if err != nil {
- return err
- }
- vmrc.VirtualMachineRunCommandProperties = &virtualMachineRunCommandProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmrc.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmrc.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vmrc.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- vmrc.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmrc.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineRunCommandInstanceView the instance view of a virtual machine run command.
-type VirtualMachineRunCommandInstanceView struct {
- // ExecutionState - Script execution status. Possible values include: 'ExecutionStateUnknown', 'ExecutionStatePending', 'ExecutionStateRunning', 'ExecutionStateFailed', 'ExecutionStateSucceeded', 'ExecutionStateTimedOut', 'ExecutionStateCanceled'
- ExecutionState ExecutionState `json:"executionState,omitempty"`
- // ExecutionMessage - Communicate script configuration errors or execution messages.
- ExecutionMessage *string `json:"executionMessage,omitempty"`
- // ExitCode - Exit code returned from script execution.
- ExitCode *int32 `json:"exitCode,omitempty"`
- // Output - Script output stream.
- Output *string `json:"output,omitempty"`
- // Error - Script error stream.
- Error *string `json:"error,omitempty"`
- // StartTime - Script start time.
- StartTime *date.Time `json:"startTime,omitempty"`
- // EndTime - Script end time.
- EndTime *date.Time `json:"endTime,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
-}
-
-// VirtualMachineRunCommandProperties describes the properties of a Virtual Machine run command.
-type VirtualMachineRunCommandProperties struct {
- // Source - The source of the run command script.
- Source *VirtualMachineRunCommandScriptSource `json:"source,omitempty"`
- // Parameters - The parameters used by the script.
- Parameters *[]RunCommandInputParameter `json:"parameters,omitempty"`
- // ProtectedParameters - The parameters used by the script.
- ProtectedParameters *[]RunCommandInputParameter `json:"protectedParameters,omitempty"`
- // AsyncExecution - Optional. If set to true, provisioning will complete as soon as the script starts and will not wait for script to complete.
- AsyncExecution *bool `json:"asyncExecution,omitempty"`
- // RunAsUser - Specifies the user account on the VM when executing the run command.
- RunAsUser *string `json:"runAsUser,omitempty"`
- // RunAsPassword - Specifies the user account password on the VM when executing the run command.
- RunAsPassword *string `json:"runAsPassword,omitempty"`
- // TimeoutInSeconds - The timeout in seconds to execute the run command.
- TimeoutInSeconds *int32 `json:"timeoutInSeconds,omitempty"`
- // OutputBlobURI - Specifies the Azure storage blob where script output stream will be uploaded.
- OutputBlobURI *string `json:"outputBlobUri,omitempty"`
- // ErrorBlobURI - Specifies the Azure storage blob where script error stream will be uploaded.
- ErrorBlobURI *string `json:"errorBlobUri,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // InstanceView - READ-ONLY; The virtual machine run command instance view.
- InstanceView *VirtualMachineRunCommandInstanceView `json:"instanceView,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineRunCommandProperties.
-func (vmrcp VirtualMachineRunCommandProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmrcp.Source != nil {
- objectMap["source"] = vmrcp.Source
- }
- if vmrcp.Parameters != nil {
- objectMap["parameters"] = vmrcp.Parameters
- }
- if vmrcp.ProtectedParameters != nil {
- objectMap["protectedParameters"] = vmrcp.ProtectedParameters
- }
- if vmrcp.AsyncExecution != nil {
- objectMap["asyncExecution"] = vmrcp.AsyncExecution
- }
- if vmrcp.RunAsUser != nil {
- objectMap["runAsUser"] = vmrcp.RunAsUser
- }
- if vmrcp.RunAsPassword != nil {
- objectMap["runAsPassword"] = vmrcp.RunAsPassword
- }
- if vmrcp.TimeoutInSeconds != nil {
- objectMap["timeoutInSeconds"] = vmrcp.TimeoutInSeconds
- }
- if vmrcp.OutputBlobURI != nil {
- objectMap["outputBlobUri"] = vmrcp.OutputBlobURI
- }
- if vmrcp.ErrorBlobURI != nil {
- objectMap["errorBlobUri"] = vmrcp.ErrorBlobURI
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineRunCommandsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type VirtualMachineRunCommandsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineRunCommandsClient) (VirtualMachineRunCommand, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineRunCommandsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineRunCommandsCreateOrUpdateFuture.Result.
-func (future *VirtualMachineRunCommandsCreateOrUpdateFuture) result(client VirtualMachineRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmrc.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmrc.Response.Response, err = future.GetResult(sender); err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent {
- vmrc, err = client.CreateOrUpdateResponder(vmrc.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsCreateOrUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineRunCommandScriptSource describes the script sources for run command.
-type VirtualMachineRunCommandScriptSource struct {
- // Script - Specifies the script content to be executed on the VM.
- Script *string `json:"script,omitempty"`
- // ScriptURI - Specifies the script download location.
- ScriptURI *string `json:"scriptUri,omitempty"`
- // CommandID - Specifies a commandId of predefined built-in script.
- CommandID *string `json:"commandId,omitempty"`
-}
-
-// VirtualMachineRunCommandsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineRunCommandsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineRunCommandsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineRunCommandsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineRunCommandsDeleteFuture.Result.
-func (future *VirtualMachineRunCommandsDeleteFuture) result(client VirtualMachineRunCommandsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineRunCommandsListResult the List run command operation response
-type VirtualMachineRunCommandsListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of run commands
- Value *[]VirtualMachineRunCommand `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of run commands.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// VirtualMachineRunCommandsListResultIterator provides access to a complete listing of
-// VirtualMachineRunCommand values.
-type VirtualMachineRunCommandsListResultIterator struct {
- i int
- page VirtualMachineRunCommandsListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *VirtualMachineRunCommandsListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *VirtualMachineRunCommandsListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter VirtualMachineRunCommandsListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter VirtualMachineRunCommandsListResultIterator) Response() VirtualMachineRunCommandsListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter VirtualMachineRunCommandsListResultIterator) Value() VirtualMachineRunCommand {
- if !iter.page.NotDone() {
- return VirtualMachineRunCommand{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the VirtualMachineRunCommandsListResultIterator type.
-func NewVirtualMachineRunCommandsListResultIterator(page VirtualMachineRunCommandsListResultPage) VirtualMachineRunCommandsListResultIterator {
- return VirtualMachineRunCommandsListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (vmrclr VirtualMachineRunCommandsListResult) IsEmpty() bool {
- return vmrclr.Value == nil || len(*vmrclr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (vmrclr VirtualMachineRunCommandsListResult) hasNextLink() bool {
- return vmrclr.NextLink != nil && len(*vmrclr.NextLink) != 0
-}
-
-// virtualMachineRunCommandsListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (vmrclr VirtualMachineRunCommandsListResult) virtualMachineRunCommandsListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !vmrclr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(vmrclr.NextLink)))
-}
-
-// VirtualMachineRunCommandsListResultPage contains a page of VirtualMachineRunCommand values.
-type VirtualMachineRunCommandsListResultPage struct {
- fn func(context.Context, VirtualMachineRunCommandsListResult) (VirtualMachineRunCommandsListResult, error)
- vmrclr VirtualMachineRunCommandsListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *VirtualMachineRunCommandsListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.vmrclr)
- if err != nil {
- return err
- }
- page.vmrclr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *VirtualMachineRunCommandsListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page VirtualMachineRunCommandsListResultPage) NotDone() bool {
- return !page.vmrclr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page VirtualMachineRunCommandsListResultPage) Response() VirtualMachineRunCommandsListResult {
- return page.vmrclr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page VirtualMachineRunCommandsListResultPage) Values() []VirtualMachineRunCommand {
- if page.vmrclr.IsEmpty() {
- return nil
- }
- return *page.vmrclr.Value
-}
-
-// Creates a new instance of the VirtualMachineRunCommandsListResultPage type.
-func NewVirtualMachineRunCommandsListResultPage(cur VirtualMachineRunCommandsListResult, getNextPage func(context.Context, VirtualMachineRunCommandsListResult) (VirtualMachineRunCommandsListResult, error)) VirtualMachineRunCommandsListResultPage {
- return VirtualMachineRunCommandsListResultPage{
- fn: getNextPage,
- vmrclr: cur,
- }
-}
-
-// VirtualMachineRunCommandsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineRunCommandsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineRunCommandsClient) (VirtualMachineRunCommand, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineRunCommandsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineRunCommandsUpdateFuture.Result.
-func (future *VirtualMachineRunCommandsUpdateFuture) result(client VirtualMachineRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmrc.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineRunCommandsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmrc.Response.Response, err = future.GetResult(sender); err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent {
- vmrc, err = client.UpdateResponder(vmrc.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineRunCommandUpdate describes a Virtual Machine run command.
-type VirtualMachineRunCommandUpdate struct {
- *VirtualMachineRunCommandProperties `json:"properties,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineRunCommandUpdate.
-func (vmrcu VirtualMachineRunCommandUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmrcu.VirtualMachineRunCommandProperties != nil {
- objectMap["properties"] = vmrcu.VirtualMachineRunCommandProperties
- }
- if vmrcu.Tags != nil {
- objectMap["tags"] = vmrcu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineRunCommandUpdate struct.
-func (vmrcu *VirtualMachineRunCommandUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var virtualMachineRunCommandProperties VirtualMachineRunCommandProperties
- err = json.Unmarshal(*v, &virtualMachineRunCommandProperties)
- if err != nil {
- return err
- }
- vmrcu.VirtualMachineRunCommandProperties = &virtualMachineRunCommandProperties
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmrcu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachinesAssessPatchesFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachinesAssessPatchesFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (VirtualMachineAssessPatchesResult, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesAssessPatchesFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesAssessPatchesFuture.Result.
-func (future *VirtualMachinesAssessPatchesFuture) result(client VirtualMachinesClient) (vmapr VirtualMachineAssessPatchesResult, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesAssessPatchesFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmapr.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesAssessPatchesFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmapr.Response.Response, err = future.GetResult(sender); err == nil && vmapr.Response.Response.StatusCode != http.StatusNoContent {
- vmapr, err = client.AssessPatchesResponder(vmapr.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesAssessPatchesFuture", "Result", vmapr.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSet describes a Virtual Machine Scale Set.
-type VirtualMachineScaleSet struct {
- autorest.Response `json:"-"`
- // Sku - The virtual machine scale set sku.
- Sku *Sku `json:"sku,omitempty"`
- // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
- Plan *Plan `json:"plan,omitempty"`
- *VirtualMachineScaleSetProperties `json:"properties,omitempty"`
- // Identity - The identity of the virtual machine scale set, if configured.
- Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"`
- // Zones - The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set
- Zones *[]string `json:"zones,omitempty"`
- // ExtendedLocation - The extended location of the Virtual Machine Scale Set.
- ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSet.
-func (vmss VirtualMachineScaleSet) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmss.Sku != nil {
- objectMap["sku"] = vmss.Sku
- }
- if vmss.Plan != nil {
- objectMap["plan"] = vmss.Plan
- }
- if vmss.VirtualMachineScaleSetProperties != nil {
- objectMap["properties"] = vmss.VirtualMachineScaleSetProperties
- }
- if vmss.Identity != nil {
- objectMap["identity"] = vmss.Identity
- }
- if vmss.Zones != nil {
- objectMap["zones"] = vmss.Zones
- }
- if vmss.ExtendedLocation != nil {
- objectMap["extendedLocation"] = vmss.ExtendedLocation
- }
- if vmss.Location != nil {
- objectMap["location"] = vmss.Location
- }
- if vmss.Tags != nil {
- objectMap["tags"] = vmss.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSet struct.
-func (vmss *VirtualMachineScaleSet) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "sku":
- if v != nil {
- var sku Sku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- vmss.Sku = &sku
- }
- case "plan":
- if v != nil {
- var plan Plan
- err = json.Unmarshal(*v, &plan)
- if err != nil {
- return err
- }
- vmss.Plan = &plan
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetProperties VirtualMachineScaleSetProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetProperties)
- if err != nil {
- return err
- }
- vmss.VirtualMachineScaleSetProperties = &virtualMachineScaleSetProperties
- }
- case "identity":
- if v != nil {
- var identity VirtualMachineScaleSetIdentity
- err = json.Unmarshal(*v, &identity)
- if err != nil {
- return err
- }
- vmss.Identity = &identity
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- vmss.Zones = &zones
- }
- case "extendedLocation":
- if v != nil {
- var extendedLocation ExtendedLocation
- err = json.Unmarshal(*v, &extendedLocation)
- if err != nil {
- return err
- }
- vmss.ExtendedLocation = &extendedLocation
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmss.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmss.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vmss.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- vmss.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmss.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetDataDisk describes a virtual machine scale set data disk.
-type VirtualMachineScaleSetDataDisk struct {
- // Name - The disk name.
- Name *string `json:"name,omitempty"`
- // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
- Lun *int32 `json:"lun,omitempty"`
- // Caching - Specifies the caching requirements.
Possible values are:
**None**
**ReadOnly**
**ReadWrite**
Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk.
- WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
- // CreateOption - The create option. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach'
- CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
- // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // ManagedDisk - The managed disk parameters.
- ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"`
- // DiskIOPSReadWrite - Specifies the Read-Write IOPS for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.
- DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"`
- // DiskMBpsReadWrite - Specifies the bandwidth in MB per second for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.
- DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"`
-}
-
-// VirtualMachineScaleSetExtension describes a Virtual Machine Scale Set Extension.
-type VirtualMachineScaleSetExtension struct {
- autorest.Response `json:"-"`
- // Name - The name of the extension.
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetExtension.
-func (vmsse VirtualMachineScaleSetExtension) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmsse.Name != nil {
- objectMap["name"] = vmsse.Name
- }
- if vmsse.VirtualMachineScaleSetExtensionProperties != nil {
- objectMap["properties"] = vmsse.VirtualMachineScaleSetExtensionProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetExtension struct.
-func (vmsse *VirtualMachineScaleSetExtension) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmsse.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vmsse.Type = &typeVar
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetExtensionProperties VirtualMachineScaleSetExtensionProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetExtensionProperties)
- if err != nil {
- return err
- }
- vmsse.VirtualMachineScaleSetExtensionProperties = &virtualMachineScaleSetExtensionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmsse.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetExtensionListResult the List VM scale set extension operation response.
-type VirtualMachineScaleSetExtensionListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of VM scale set extensions.
- Value *[]VirtualMachineScaleSetExtension `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of VM scale set extensions. Call ListNext() with this to fetch the next page of VM scale set extensions.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// VirtualMachineScaleSetExtensionListResultIterator provides access to a complete listing of
-// VirtualMachineScaleSetExtension values.
-type VirtualMachineScaleSetExtensionListResultIterator struct {
- i int
- page VirtualMachineScaleSetExtensionListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *VirtualMachineScaleSetExtensionListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *VirtualMachineScaleSetExtensionListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter VirtualMachineScaleSetExtensionListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter VirtualMachineScaleSetExtensionListResultIterator) Response() VirtualMachineScaleSetExtensionListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter VirtualMachineScaleSetExtensionListResultIterator) Value() VirtualMachineScaleSetExtension {
- if !iter.page.NotDone() {
- return VirtualMachineScaleSetExtension{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the VirtualMachineScaleSetExtensionListResultIterator type.
-func NewVirtualMachineScaleSetExtensionListResultIterator(page VirtualMachineScaleSetExtensionListResultPage) VirtualMachineScaleSetExtensionListResultIterator {
- return VirtualMachineScaleSetExtensionListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (vmsselr VirtualMachineScaleSetExtensionListResult) IsEmpty() bool {
- return vmsselr.Value == nil || len(*vmsselr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (vmsselr VirtualMachineScaleSetExtensionListResult) hasNextLink() bool {
- return vmsselr.NextLink != nil && len(*vmsselr.NextLink) != 0
-}
-
-// virtualMachineScaleSetExtensionListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (vmsselr VirtualMachineScaleSetExtensionListResult) virtualMachineScaleSetExtensionListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !vmsselr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(vmsselr.NextLink)))
-}
-
-// VirtualMachineScaleSetExtensionListResultPage contains a page of VirtualMachineScaleSetExtension values.
-type VirtualMachineScaleSetExtensionListResultPage struct {
- fn func(context.Context, VirtualMachineScaleSetExtensionListResult) (VirtualMachineScaleSetExtensionListResult, error)
- vmsselr VirtualMachineScaleSetExtensionListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *VirtualMachineScaleSetExtensionListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.vmsselr)
- if err != nil {
- return err
- }
- page.vmsselr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *VirtualMachineScaleSetExtensionListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page VirtualMachineScaleSetExtensionListResultPage) NotDone() bool {
- return !page.vmsselr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page VirtualMachineScaleSetExtensionListResultPage) Response() VirtualMachineScaleSetExtensionListResult {
- return page.vmsselr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page VirtualMachineScaleSetExtensionListResultPage) Values() []VirtualMachineScaleSetExtension {
- if page.vmsselr.IsEmpty() {
- return nil
- }
- return *page.vmsselr.Value
-}
-
-// Creates a new instance of the VirtualMachineScaleSetExtensionListResultPage type.
-func NewVirtualMachineScaleSetExtensionListResultPage(cur VirtualMachineScaleSetExtensionListResult, getNextPage func(context.Context, VirtualMachineScaleSetExtensionListResult) (VirtualMachineScaleSetExtensionListResult, error)) VirtualMachineScaleSetExtensionListResultPage {
- return VirtualMachineScaleSetExtensionListResultPage{
- fn: getNextPage,
- vmsselr: cur,
- }
-}
-
-// VirtualMachineScaleSetExtensionProfile describes a virtual machine scale set extension profile.
-type VirtualMachineScaleSetExtensionProfile struct {
- // Extensions - The virtual machine scale set child extension resources.
- Extensions *[]VirtualMachineScaleSetExtension `json:"extensions,omitempty"`
- // ExtensionsTimeBudget - Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. The default value is 90 minutes (PT1H30M).
Minimum api-version: 2020-06-01
- ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty"`
-}
-
-// VirtualMachineScaleSetExtensionProperties describes the properties of a Virtual Machine Scale Set
-// Extension.
-type VirtualMachineScaleSetExtensionProperties struct {
- // ForceUpdateTag - If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
- ForceUpdateTag *string `json:"forceUpdateTag,omitempty"`
- // Publisher - The name of the extension handler publisher.
- Publisher *string `json:"publisher,omitempty"`
- // Type - Specifies the type of the extension; an example is "CustomScriptExtension".
- Type *string `json:"type,omitempty"`
- // TypeHandlerVersion - Specifies the version of the script handler.
- TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"`
- // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
- AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"`
- // EnableAutomaticUpgrade - Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.
- EnableAutomaticUpgrade *bool `json:"enableAutomaticUpgrade,omitempty"`
- // Settings - Json formatted public settings for the extension.
- Settings interface{} `json:"settings,omitempty"`
- // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
- ProtectedSettings interface{} `json:"protectedSettings,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // ProvisionAfterExtensions - Collection of extension names after which this extension needs to be provisioned.
- ProvisionAfterExtensions *[]string `json:"provisionAfterExtensions,omitempty"`
- // SuppressFailures - Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false.
- SuppressFailures *bool `json:"suppressFailures,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetExtensionProperties.
-func (vmssep VirtualMachineScaleSetExtensionProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssep.ForceUpdateTag != nil {
- objectMap["forceUpdateTag"] = vmssep.ForceUpdateTag
- }
- if vmssep.Publisher != nil {
- objectMap["publisher"] = vmssep.Publisher
- }
- if vmssep.Type != nil {
- objectMap["type"] = vmssep.Type
- }
- if vmssep.TypeHandlerVersion != nil {
- objectMap["typeHandlerVersion"] = vmssep.TypeHandlerVersion
- }
- if vmssep.AutoUpgradeMinorVersion != nil {
- objectMap["autoUpgradeMinorVersion"] = vmssep.AutoUpgradeMinorVersion
- }
- if vmssep.EnableAutomaticUpgrade != nil {
- objectMap["enableAutomaticUpgrade"] = vmssep.EnableAutomaticUpgrade
- }
- if vmssep.Settings != nil {
- objectMap["settings"] = vmssep.Settings
- }
- if vmssep.ProtectedSettings != nil {
- objectMap["protectedSettings"] = vmssep.ProtectedSettings
- }
- if vmssep.ProvisionAfterExtensions != nil {
- objectMap["provisionAfterExtensions"] = vmssep.ProvisionAfterExtensions
- }
- if vmssep.SuppressFailures != nil {
- objectMap["suppressFailures"] = vmssep.SuppressFailures
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
-// results of a long-running operation.
-type VirtualMachineScaleSetExtensionsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetExtensionsClient) (VirtualMachineScaleSetExtension, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetExtensionsCreateOrUpdateFuture.Result.
-func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmsse.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmsse.Response.Response, err = future.GetResult(sender); err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent {
- vmsse, err = client.CreateOrUpdateResponder(vmsse.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of
-// a long-running operation.
-type VirtualMachineScaleSetExtensionsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetExtensionsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetExtensionsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetExtensionsDeleteFuture.Result.
-func (future *VirtualMachineScaleSetExtensionsDeleteFuture) result(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetExtensionsUpdateFuture an abstraction for monitoring and retrieving the results of
-// a long-running operation.
-type VirtualMachineScaleSetExtensionsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetExtensionsClient) (VirtualMachineScaleSetExtension, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetExtensionsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetExtensionsUpdateFuture.Result.
-func (future *VirtualMachineScaleSetExtensionsUpdateFuture) result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmsse.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmsse.Response.Response, err = future.GetResult(sender); err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent {
- vmsse, err = client.UpdateResponder(vmsse.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetExtensionUpdate describes a Virtual Machine Scale Set Extension.
-type VirtualMachineScaleSetExtensionUpdate struct {
- // Name - READ-ONLY; The name of the extension.
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetExtensionUpdate.
-func (vmsseu VirtualMachineScaleSetExtensionUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmsseu.VirtualMachineScaleSetExtensionProperties != nil {
- objectMap["properties"] = vmsseu.VirtualMachineScaleSetExtensionProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetExtensionUpdate struct.
-func (vmsseu *VirtualMachineScaleSetExtensionUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmsseu.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vmsseu.Type = &typeVar
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetExtensionProperties VirtualMachineScaleSetExtensionProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetExtensionProperties)
- if err != nil {
- return err
- }
- vmsseu.VirtualMachineScaleSetExtensionProperties = &virtualMachineScaleSetExtensionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmsseu.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetIdentity identity for the virtual machine scale set.
-type VirtualMachineScaleSetIdentity struct {
- // PrincipalID - READ-ONLY; The principal id of virtual machine scale set identity. This property will only be provided for a system assigned identity.
- PrincipalID *string `json:"principalId,omitempty"`
- // TenantID - READ-ONLY; The tenant id associated with the virtual machine scale set. This property will only be provided for a system assigned identity.
- TenantID *string `json:"tenantId,omitempty"`
- // Type - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine scale set. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone'
- Type ResourceIdentityType `json:"type,omitempty"`
- // UserAssignedIdentities - The list of user identities associated with the virtual machine scale set. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
- UserAssignedIdentities map[string]*VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIdentity.
-func (vmssi VirtualMachineScaleSetIdentity) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssi.Type != "" {
- objectMap["type"] = vmssi.Type
- }
- if vmssi.UserAssignedIdentities != nil {
- objectMap["userAssignedIdentities"] = vmssi.UserAssignedIdentities
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue ...
-type VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue struct {
- // PrincipalID - READ-ONLY; The principal id of user assigned identity.
- PrincipalID *string `json:"principalId,omitempty"`
- // ClientID - READ-ONLY; The client id of user assigned identity.
- ClientID *string `json:"clientId,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue.
-func (vmssiAiv VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetInstanceView the instance view of a virtual machine scale set.
-type VirtualMachineScaleSetInstanceView struct {
- autorest.Response `json:"-"`
- // VirtualMachine - READ-ONLY; The instance view status summary for the virtual machine scale set.
- VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"`
- // Extensions - READ-ONLY; The extensions information.
- Extensions *[]VirtualMachineScaleSetVMExtensionsSummary `json:"extensions,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
- // OrchestrationServices - READ-ONLY; The orchestration services information.
- OrchestrationServices *[]OrchestrationServiceSummary `json:"orchestrationServices,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetInstanceView.
-func (vmssiv VirtualMachineScaleSetInstanceView) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssiv.Statuses != nil {
- objectMap["statuses"] = vmssiv.Statuses
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetInstanceViewStatusesSummary instance view statuses summary for virtual machines of
-// a virtual machine scale set.
-type VirtualMachineScaleSetInstanceViewStatusesSummary struct {
- // StatusesSummary - READ-ONLY; The extensions information.
- StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetInstanceViewStatusesSummary.
-func (vmssivss VirtualMachineScaleSetInstanceViewStatusesSummary) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetIPConfiguration describes a virtual machine scale set network profile's IP
-// configuration.
-type VirtualMachineScaleSetIPConfiguration struct {
- // Name - The IP configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIPConfiguration.
-func (vmssic VirtualMachineScaleSetIPConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssic.Name != nil {
- objectMap["name"] = vmssic.Name
- }
- if vmssic.VirtualMachineScaleSetIPConfigurationProperties != nil {
- objectMap["properties"] = vmssic.VirtualMachineScaleSetIPConfigurationProperties
- }
- if vmssic.ID != nil {
- objectMap["id"] = vmssic.ID
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetIPConfiguration struct.
-func (vmssic *VirtualMachineScaleSetIPConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmssic.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetIPConfigurationProperties VirtualMachineScaleSetIPConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetIPConfigurationProperties)
- if err != nil {
- return err
- }
- vmssic.VirtualMachineScaleSetIPConfigurationProperties = &virtualMachineScaleSetIPConfigurationProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmssic.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetIPConfigurationProperties describes a virtual machine scale set network profile's
-// IP configuration properties.
-type VirtualMachineScaleSetIPConfigurationProperties struct {
- // Subnet - Specifies the identifier of the subnet.
- Subnet *APIEntityReference `json:"subnet,omitempty"`
- // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface.
- Primary *bool `json:"primary,omitempty"`
- // PublicIPAddressConfiguration - The publicIPAddressConfiguration.
- PublicIPAddressConfiguration *VirtualMachineScaleSetPublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"`
- // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionIPv4', 'IPVersionIPv6'
- PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"`
- // ApplicationGatewayBackendAddressPools - Specifies an array of references to backend address pools of application gateways. A scale set can reference backend address pools of multiple application gateways. Multiple scale sets cannot use the same application gateway.
- ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"`
- // ApplicationSecurityGroups - Specifies an array of references to application security group.
- ApplicationSecurityGroups *[]SubResource `json:"applicationSecurityGroups,omitempty"`
- // LoadBalancerBackendAddressPools - Specifies an array of references to backend address pools of load balancers. A scale set can reference backend address pools of one public and one internal load balancer. Multiple scale sets cannot use the same basic sku load balancer.
- LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"`
- // LoadBalancerInboundNatPools - Specifies an array of references to inbound Nat pools of the load balancers. A scale set can reference inbound nat pools of one public and one internal load balancer. Multiple scale sets cannot use the same basic sku load balancer.
- LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"`
-}
-
-// VirtualMachineScaleSetIPTag contains the IP tag associated with the public IP address.
-type VirtualMachineScaleSetIPTag struct {
- // IPTagType - IP tag type. Example: FirstPartyUsage.
- IPTagType *string `json:"ipTagType,omitempty"`
- // Tag - IP tag associated with the public IP. Example: SQL, Storage etc.
- Tag *string `json:"tag,omitempty"`
-}
-
-// VirtualMachineScaleSetListOSUpgradeHistory list of Virtual Machine Scale Set OS Upgrade History
-// operation response.
-type VirtualMachineScaleSetListOSUpgradeHistory struct {
- autorest.Response `json:"-"`
- // Value - The list of OS upgrades performed on the virtual machine scale set.
- Value *[]UpgradeOperationHistoricalStatusInfo `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of OS Upgrade History. Call ListNext() with this to fetch the next page of history of upgrades.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// VirtualMachineScaleSetListOSUpgradeHistoryIterator provides access to a complete listing of
-// UpgradeOperationHistoricalStatusInfo values.
-type VirtualMachineScaleSetListOSUpgradeHistoryIterator struct {
- i int
- page VirtualMachineScaleSetListOSUpgradeHistoryPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *VirtualMachineScaleSetListOSUpgradeHistoryIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetListOSUpgradeHistoryIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *VirtualMachineScaleSetListOSUpgradeHistoryIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) Response() VirtualMachineScaleSetListOSUpgradeHistory {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) Value() UpgradeOperationHistoricalStatusInfo {
- if !iter.page.NotDone() {
- return UpgradeOperationHistoricalStatusInfo{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the VirtualMachineScaleSetListOSUpgradeHistoryIterator type.
-func NewVirtualMachineScaleSetListOSUpgradeHistoryIterator(page VirtualMachineScaleSetListOSUpgradeHistoryPage) VirtualMachineScaleSetListOSUpgradeHistoryIterator {
- return VirtualMachineScaleSetListOSUpgradeHistoryIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (vmsslouh VirtualMachineScaleSetListOSUpgradeHistory) IsEmpty() bool {
- return vmsslouh.Value == nil || len(*vmsslouh.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (vmsslouh VirtualMachineScaleSetListOSUpgradeHistory) hasNextLink() bool {
- return vmsslouh.NextLink != nil && len(*vmsslouh.NextLink) != 0
-}
-
-// virtualMachineScaleSetListOSUpgradeHistoryPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (vmsslouh VirtualMachineScaleSetListOSUpgradeHistory) virtualMachineScaleSetListOSUpgradeHistoryPreparer(ctx context.Context) (*http.Request, error) {
- if !vmsslouh.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(vmsslouh.NextLink)))
-}
-
-// VirtualMachineScaleSetListOSUpgradeHistoryPage contains a page of UpgradeOperationHistoricalStatusInfo
-// values.
-type VirtualMachineScaleSetListOSUpgradeHistoryPage struct {
- fn func(context.Context, VirtualMachineScaleSetListOSUpgradeHistory) (VirtualMachineScaleSetListOSUpgradeHistory, error)
- vmsslouh VirtualMachineScaleSetListOSUpgradeHistory
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *VirtualMachineScaleSetListOSUpgradeHistoryPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetListOSUpgradeHistoryPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.vmsslouh)
- if err != nil {
- return err
- }
- page.vmsslouh = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *VirtualMachineScaleSetListOSUpgradeHistoryPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) NotDone() bool {
- return !page.vmsslouh.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) Response() VirtualMachineScaleSetListOSUpgradeHistory {
- return page.vmsslouh
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) Values() []UpgradeOperationHistoricalStatusInfo {
- if page.vmsslouh.IsEmpty() {
- return nil
- }
- return *page.vmsslouh.Value
-}
-
-// Creates a new instance of the VirtualMachineScaleSetListOSUpgradeHistoryPage type.
-func NewVirtualMachineScaleSetListOSUpgradeHistoryPage(cur VirtualMachineScaleSetListOSUpgradeHistory, getNextPage func(context.Context, VirtualMachineScaleSetListOSUpgradeHistory) (VirtualMachineScaleSetListOSUpgradeHistory, error)) VirtualMachineScaleSetListOSUpgradeHistoryPage {
- return VirtualMachineScaleSetListOSUpgradeHistoryPage{
- fn: getNextPage,
- vmsslouh: cur,
- }
-}
-
-// VirtualMachineScaleSetListResult the List Virtual Machine operation response.
-type VirtualMachineScaleSetListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of virtual machine scale sets.
- Value *[]VirtualMachineScaleSet `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of VMSS.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// VirtualMachineScaleSetListResultIterator provides access to a complete listing of VirtualMachineScaleSet
-// values.
-type VirtualMachineScaleSetListResultIterator struct {
- i int
- page VirtualMachineScaleSetListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *VirtualMachineScaleSetListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *VirtualMachineScaleSetListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter VirtualMachineScaleSetListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter VirtualMachineScaleSetListResultIterator) Response() VirtualMachineScaleSetListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter VirtualMachineScaleSetListResultIterator) Value() VirtualMachineScaleSet {
- if !iter.page.NotDone() {
- return VirtualMachineScaleSet{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the VirtualMachineScaleSetListResultIterator type.
-func NewVirtualMachineScaleSetListResultIterator(page VirtualMachineScaleSetListResultPage) VirtualMachineScaleSetListResultIterator {
- return VirtualMachineScaleSetListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (vmsslr VirtualMachineScaleSetListResult) IsEmpty() bool {
- return vmsslr.Value == nil || len(*vmsslr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (vmsslr VirtualMachineScaleSetListResult) hasNextLink() bool {
- return vmsslr.NextLink != nil && len(*vmsslr.NextLink) != 0
-}
-
-// virtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (vmsslr VirtualMachineScaleSetListResult) virtualMachineScaleSetListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !vmsslr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(vmsslr.NextLink)))
-}
-
-// VirtualMachineScaleSetListResultPage contains a page of VirtualMachineScaleSet values.
-type VirtualMachineScaleSetListResultPage struct {
- fn func(context.Context, VirtualMachineScaleSetListResult) (VirtualMachineScaleSetListResult, error)
- vmsslr VirtualMachineScaleSetListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *VirtualMachineScaleSetListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.vmsslr)
- if err != nil {
- return err
- }
- page.vmsslr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *VirtualMachineScaleSetListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page VirtualMachineScaleSetListResultPage) NotDone() bool {
- return !page.vmsslr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page VirtualMachineScaleSetListResultPage) Response() VirtualMachineScaleSetListResult {
- return page.vmsslr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page VirtualMachineScaleSetListResultPage) Values() []VirtualMachineScaleSet {
- if page.vmsslr.IsEmpty() {
- return nil
- }
- return *page.vmsslr.Value
-}
-
-// Creates a new instance of the VirtualMachineScaleSetListResultPage type.
-func NewVirtualMachineScaleSetListResultPage(cur VirtualMachineScaleSetListResult, getNextPage func(context.Context, VirtualMachineScaleSetListResult) (VirtualMachineScaleSetListResult, error)) VirtualMachineScaleSetListResultPage {
- return VirtualMachineScaleSetListResultPage{
- fn: getNextPage,
- vmsslr: cur,
- }
-}
-
-// VirtualMachineScaleSetListSkusResult the Virtual Machine Scale Set List Skus operation response.
-type VirtualMachineScaleSetListSkusResult struct {
- autorest.Response `json:"-"`
- // Value - The list of skus available for the virtual machine scale set.
- Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of Virtual Machine Scale Set Skus. Call ListNext() with this to fetch the next page of VMSS Skus.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// VirtualMachineScaleSetListSkusResultIterator provides access to a complete listing of
-// VirtualMachineScaleSetSku values.
-type VirtualMachineScaleSetListSkusResultIterator struct {
- i int
- page VirtualMachineScaleSetListSkusResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *VirtualMachineScaleSetListSkusResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetListSkusResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *VirtualMachineScaleSetListSkusResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter VirtualMachineScaleSetListSkusResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter VirtualMachineScaleSetListSkusResultIterator) Response() VirtualMachineScaleSetListSkusResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter VirtualMachineScaleSetListSkusResultIterator) Value() VirtualMachineScaleSetSku {
- if !iter.page.NotDone() {
- return VirtualMachineScaleSetSku{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the VirtualMachineScaleSetListSkusResultIterator type.
-func NewVirtualMachineScaleSetListSkusResultIterator(page VirtualMachineScaleSetListSkusResultPage) VirtualMachineScaleSetListSkusResultIterator {
- return VirtualMachineScaleSetListSkusResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (vmsslsr VirtualMachineScaleSetListSkusResult) IsEmpty() bool {
- return vmsslsr.Value == nil || len(*vmsslsr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (vmsslsr VirtualMachineScaleSetListSkusResult) hasNextLink() bool {
- return vmsslsr.NextLink != nil && len(*vmsslsr.NextLink) != 0
-}
-
-// virtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (vmsslsr VirtualMachineScaleSetListSkusResult) virtualMachineScaleSetListSkusResultPreparer(ctx context.Context) (*http.Request, error) {
- if !vmsslsr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(vmsslsr.NextLink)))
-}
-
-// VirtualMachineScaleSetListSkusResultPage contains a page of VirtualMachineScaleSetSku values.
-type VirtualMachineScaleSetListSkusResultPage struct {
- fn func(context.Context, VirtualMachineScaleSetListSkusResult) (VirtualMachineScaleSetListSkusResult, error)
- vmsslsr VirtualMachineScaleSetListSkusResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *VirtualMachineScaleSetListSkusResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetListSkusResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.vmsslsr)
- if err != nil {
- return err
- }
- page.vmsslsr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *VirtualMachineScaleSetListSkusResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page VirtualMachineScaleSetListSkusResultPage) NotDone() bool {
- return !page.vmsslsr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page VirtualMachineScaleSetListSkusResultPage) Response() VirtualMachineScaleSetListSkusResult {
- return page.vmsslsr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page VirtualMachineScaleSetListSkusResultPage) Values() []VirtualMachineScaleSetSku {
- if page.vmsslsr.IsEmpty() {
- return nil
- }
- return *page.vmsslsr.Value
-}
-
-// Creates a new instance of the VirtualMachineScaleSetListSkusResultPage type.
-func NewVirtualMachineScaleSetListSkusResultPage(cur VirtualMachineScaleSetListSkusResult, getNextPage func(context.Context, VirtualMachineScaleSetListSkusResult) (VirtualMachineScaleSetListSkusResult, error)) VirtualMachineScaleSetListSkusResultPage {
- return VirtualMachineScaleSetListSkusResultPage{
- fn: getNextPage,
- vmsslsr: cur,
- }
-}
-
-// VirtualMachineScaleSetListWithLinkResult the List Virtual Machine operation response.
-type VirtualMachineScaleSetListWithLinkResult struct {
- autorest.Response `json:"-"`
- // Value - The list of virtual machine scale sets.
- Value *[]VirtualMachineScaleSet `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of Virtual Machine Scale Sets.
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// VirtualMachineScaleSetListWithLinkResultIterator provides access to a complete listing of
-// VirtualMachineScaleSet values.
-type VirtualMachineScaleSetListWithLinkResultIterator struct {
- i int
- page VirtualMachineScaleSetListWithLinkResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *VirtualMachineScaleSetListWithLinkResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetListWithLinkResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *VirtualMachineScaleSetListWithLinkResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter VirtualMachineScaleSetListWithLinkResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter VirtualMachineScaleSetListWithLinkResultIterator) Response() VirtualMachineScaleSetListWithLinkResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter VirtualMachineScaleSetListWithLinkResultIterator) Value() VirtualMachineScaleSet {
- if !iter.page.NotDone() {
- return VirtualMachineScaleSet{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the VirtualMachineScaleSetListWithLinkResultIterator type.
-func NewVirtualMachineScaleSetListWithLinkResultIterator(page VirtualMachineScaleSetListWithLinkResultPage) VirtualMachineScaleSetListWithLinkResultIterator {
- return VirtualMachineScaleSetListWithLinkResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) IsEmpty() bool {
- return vmsslwlr.Value == nil || len(*vmsslwlr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) hasNextLink() bool {
- return vmsslwlr.NextLink != nil && len(*vmsslwlr.NextLink) != 0
-}
-
-// virtualMachineScaleSetListWithLinkResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) virtualMachineScaleSetListWithLinkResultPreparer(ctx context.Context) (*http.Request, error) {
- if !vmsslwlr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(vmsslwlr.NextLink)))
-}
-
-// VirtualMachineScaleSetListWithLinkResultPage contains a page of VirtualMachineScaleSet values.
-type VirtualMachineScaleSetListWithLinkResultPage struct {
- fn func(context.Context, VirtualMachineScaleSetListWithLinkResult) (VirtualMachineScaleSetListWithLinkResult, error)
- vmsslwlr VirtualMachineScaleSetListWithLinkResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *VirtualMachineScaleSetListWithLinkResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetListWithLinkResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.vmsslwlr)
- if err != nil {
- return err
- }
- page.vmsslwlr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *VirtualMachineScaleSetListWithLinkResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page VirtualMachineScaleSetListWithLinkResultPage) NotDone() bool {
- return !page.vmsslwlr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page VirtualMachineScaleSetListWithLinkResultPage) Response() VirtualMachineScaleSetListWithLinkResult {
- return page.vmsslwlr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page VirtualMachineScaleSetListWithLinkResultPage) Values() []VirtualMachineScaleSet {
- if page.vmsslwlr.IsEmpty() {
- return nil
- }
- return *page.vmsslwlr.Value
-}
-
-// Creates a new instance of the VirtualMachineScaleSetListWithLinkResultPage type.
-func NewVirtualMachineScaleSetListWithLinkResultPage(cur VirtualMachineScaleSetListWithLinkResult, getNextPage func(context.Context, VirtualMachineScaleSetListWithLinkResult) (VirtualMachineScaleSetListWithLinkResult, error)) VirtualMachineScaleSetListWithLinkResultPage {
- return VirtualMachineScaleSetListWithLinkResultPage{
- fn: getNextPage,
- vmsslwlr: cur,
- }
-}
-
-// VirtualMachineScaleSetManagedDiskParameters describes the parameters of a ScaleSet managed disk.
-type VirtualMachineScaleSetManagedDiskParameters struct {
- // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS', 'StorageAccountTypesPremiumZRS', 'StorageAccountTypesStandardSSDZRS'
- StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"`
- // DiskEncryptionSet - Specifies the customer managed disk encryption set resource id for the managed disk.
- DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
-}
-
-// VirtualMachineScaleSetNetworkConfiguration describes a virtual machine scale set network profile's
-// network configurations.
-type VirtualMachineScaleSetNetworkConfiguration struct {
- // Name - The network configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetNetworkConfiguration.
-func (vmssnc VirtualMachineScaleSetNetworkConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssnc.Name != nil {
- objectMap["name"] = vmssnc.Name
- }
- if vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties != nil {
- objectMap["properties"] = vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties
- }
- if vmssnc.ID != nil {
- objectMap["id"] = vmssnc.ID
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetNetworkConfiguration struct.
-func (vmssnc *VirtualMachineScaleSetNetworkConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmssnc.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetNetworkConfigurationProperties VirtualMachineScaleSetNetworkConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetNetworkConfigurationProperties)
- if err != nil {
- return err
- }
- vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties = &virtualMachineScaleSetNetworkConfigurationProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmssnc.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetNetworkConfigurationDNSSettings describes a virtual machines scale sets network
-// configuration's DNS settings.
-type VirtualMachineScaleSetNetworkConfigurationDNSSettings struct {
- // DNSServers - List of DNS servers IP addresses
- DNSServers *[]string `json:"dnsServers,omitempty"`
-}
-
-// VirtualMachineScaleSetNetworkConfigurationProperties describes a virtual machine scale set network
-// profile's IP configuration.
-type VirtualMachineScaleSetNetworkConfigurationProperties struct {
- // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface.
- Primary *bool `json:"primary,omitempty"`
- // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled.
- EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"`
- // EnableFpga - Specifies whether the network interface is FPGA networking-enabled.
- EnableFpga *bool `json:"enableFpga,omitempty"`
- // NetworkSecurityGroup - The network security group.
- NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"`
- // DNSSettings - The dns settings to be applied on the network interfaces.
- DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"`
- // IPConfigurations - Specifies the IP configurations of the network interface.
- IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"`
- // EnableIPForwarding - Whether IP forwarding enabled on this NIC.
- EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"`
- // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
- DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
-}
-
-// VirtualMachineScaleSetNetworkProfile describes a virtual machine scale set network profile.
-type VirtualMachineScaleSetNetworkProfile struct {
- // HealthProbe - A reference to a load balancer probe used to determine the health of an instance in the virtual machine scale set. The reference will be in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
- HealthProbe *APIEntityReference `json:"healthProbe,omitempty"`
- // NetworkInterfaceConfigurations - The list of network configurations.
- NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"`
- // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
- NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"`
-}
-
-// VirtualMachineScaleSetOSDisk describes a virtual machine scale set operating system disk.
-type VirtualMachineScaleSetOSDisk struct {
- // Name - The disk name.
- Name *string `json:"name,omitempty"`
- // Caching - Specifies the caching requirements.
Possible values are:
**None**
**ReadOnly**
**ReadWrite**
Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk.
- WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
- // CreateOption - Specifies how the virtual machines in the scale set should be created.
The only allowed value is: **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach'
- CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
- // DiffDiskSettings - Specifies the ephemeral disk Settings for the operating system disk used by the virtual machine scale set.
- DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"`
- // DiskSizeGB - Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.
Possible values are:
**Windows**
**Linux**. Possible values include: 'OperatingSystemTypesWindows', 'OperatingSystemTypesLinux'
- OsType OperatingSystemTypes `json:"osType,omitempty"`
- // Image - Specifies information about the unmanaged user image to base the scale set on.
- Image *VirtualHardDisk `json:"image,omitempty"`
- // VhdContainers - Specifies the container urls that are used to store operating system disks for the scale set.
- VhdContainers *[]string `json:"vhdContainers,omitempty"`
- // ManagedDisk - The managed disk parameters.
- ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"`
-}
-
-// VirtualMachineScaleSetOSProfile describes a virtual machine scale set OS profile.
-type VirtualMachineScaleSetOSProfile struct {
- // ComputerNamePrefix - Specifies the computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long.
- ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"`
- // AdminUsername - Specifies the name of the administrator account.
**Windows-only restriction:** Cannot end in "."
**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".
**Minimum-length (Linux):** 1 character
**Max-length (Linux):** 64 characters
**Max-length (Windows):** 20 characters
- AdminUsername *string `json:"adminUsername,omitempty"`
- // AdminPassword - Specifies the password of the administrator account.
**Minimum-length (Windows):** 8 characters
**Minimum-length (Linux):** 6 characters
**Max-length (Windows):** 123 characters
**Max-length (Linux):** 72 characters
**Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
Has lower characters
Has upper characters
Has a digit
Has a special character (Regex match [\W_])
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"
For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp)
For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection)
- AdminPassword *string `json:"adminPassword,omitempty"`
- // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.
For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init)
- CustomData *string `json:"customData,omitempty"`
- // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine.
- WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"`
- // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.
For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros).
- LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"`
- // Secrets - Specifies set of certificates that should be installed onto the virtual machines in the scale set. To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows).
- Secrets *[]VaultSecretGroup `json:"secrets,omitempty"`
-}
-
-// VirtualMachineScaleSetProperties describes the properties of a Virtual Machine Scale Set.
-type VirtualMachineScaleSetProperties struct {
- // UpgradePolicy - The upgrade policy.
- UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"`
- // AutomaticRepairsPolicy - Policy for automatic repairs.
- AutomaticRepairsPolicy *AutomaticRepairsPolicy `json:"automaticRepairsPolicy,omitempty"`
- // VirtualMachineProfile - The virtual machine profile.
- VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned.
- Overprovision *bool `json:"overprovision,omitempty"`
- // DoNotRunExtensionsOnOverprovisionedVMs - When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
- DoNotRunExtensionsOnOverprovisionedVMs *bool `json:"doNotRunExtensionsOnOverprovisionedVMs,omitempty"`
- // UniqueID - READ-ONLY; Specifies the ID which uniquely identifies a Virtual Machine Scale Set.
- UniqueID *string `json:"uniqueId,omitempty"`
- // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
- SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"`
- // ZoneBalance - Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage. zoneBalance property can only be set if the zones property of the scale set contains more than one zone. If there are no zones or only one zone specified, then zoneBalance property should not be set.
- ZoneBalance *bool `json:"zoneBalance,omitempty"`
- // PlatformFaultDomainCount - Fault Domain count for each placement group.
- PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"`
- // ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine scale set should be assigned to.
Minimum api-version: 2018-04-01.
- ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"`
- // HostGroup - Specifies information about the dedicated host group that the virtual machine scale set resides in.
Minimum api-version: 2020-06-01.
- HostGroup *SubResource `json:"hostGroup,omitempty"`
- // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
- AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"`
- // ScaleInPolicy - Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set.
- ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"`
- // OrchestrationMode - Specifies the orchestration mode for the virtual machine scale set. Possible values include: 'OrchestrationModeUniform', 'OrchestrationModeFlexible'
- OrchestrationMode OrchestrationMode `json:"orchestrationMode,omitempty"`
- // SpotRestorePolicy - Specifies the Spot Restore properties for the virtual machine scale set.
- SpotRestorePolicy *SpotRestorePolicy `json:"spotRestorePolicy,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetProperties.
-func (vmssp VirtualMachineScaleSetProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssp.UpgradePolicy != nil {
- objectMap["upgradePolicy"] = vmssp.UpgradePolicy
- }
- if vmssp.AutomaticRepairsPolicy != nil {
- objectMap["automaticRepairsPolicy"] = vmssp.AutomaticRepairsPolicy
- }
- if vmssp.VirtualMachineProfile != nil {
- objectMap["virtualMachineProfile"] = vmssp.VirtualMachineProfile
- }
- if vmssp.Overprovision != nil {
- objectMap["overprovision"] = vmssp.Overprovision
- }
- if vmssp.DoNotRunExtensionsOnOverprovisionedVMs != nil {
- objectMap["doNotRunExtensionsOnOverprovisionedVMs"] = vmssp.DoNotRunExtensionsOnOverprovisionedVMs
- }
- if vmssp.SinglePlacementGroup != nil {
- objectMap["singlePlacementGroup"] = vmssp.SinglePlacementGroup
- }
- if vmssp.ZoneBalance != nil {
- objectMap["zoneBalance"] = vmssp.ZoneBalance
- }
- if vmssp.PlatformFaultDomainCount != nil {
- objectMap["platformFaultDomainCount"] = vmssp.PlatformFaultDomainCount
- }
- if vmssp.ProximityPlacementGroup != nil {
- objectMap["proximityPlacementGroup"] = vmssp.ProximityPlacementGroup
- }
- if vmssp.HostGroup != nil {
- objectMap["hostGroup"] = vmssp.HostGroup
- }
- if vmssp.AdditionalCapabilities != nil {
- objectMap["additionalCapabilities"] = vmssp.AdditionalCapabilities
- }
- if vmssp.ScaleInPolicy != nil {
- objectMap["scaleInPolicy"] = vmssp.ScaleInPolicy
- }
- if vmssp.OrchestrationMode != "" {
- objectMap["orchestrationMode"] = vmssp.OrchestrationMode
- }
- if vmssp.SpotRestorePolicy != nil {
- objectMap["spotRestorePolicy"] = vmssp.SpotRestorePolicy
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetPublicIPAddressConfiguration describes a virtual machines scale set IP
-// Configuration's PublicIPAddress configuration
-type VirtualMachineScaleSetPublicIPAddressConfiguration struct {
- // Name - The publicIP address configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachineScaleSetPublicIPAddressConfigurationProperties `json:"properties,omitempty"`
- Sku *PublicIPAddressSku `json:"sku,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetPublicIPAddressConfiguration.
-func (vmsspiac VirtualMachineScaleSetPublicIPAddressConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmsspiac.Name != nil {
- objectMap["name"] = vmsspiac.Name
- }
- if vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties != nil {
- objectMap["properties"] = vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties
- }
- if vmsspiac.Sku != nil {
- objectMap["sku"] = vmsspiac.Sku
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetPublicIPAddressConfiguration struct.
-func (vmsspiac *VirtualMachineScaleSetPublicIPAddressConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmsspiac.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetPublicIPAddressConfigurationProperties VirtualMachineScaleSetPublicIPAddressConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetPublicIPAddressConfigurationProperties)
- if err != nil {
- return err
- }
- vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties = &virtualMachineScaleSetPublicIPAddressConfigurationProperties
- }
- case "sku":
- if v != nil {
- var sku PublicIPAddressSku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- vmsspiac.Sku = &sku
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings describes a virtual machines scale sets
-// network configuration's DNS settings.
-type VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings struct {
- // DomainNameLabel - The Domain name label.The concatenation of the domain name label and vm index will be the domain name labels of the PublicIPAddress resources that will be created
- DomainNameLabel *string `json:"domainNameLabel,omitempty"`
-}
-
-// VirtualMachineScaleSetPublicIPAddressConfigurationProperties describes a virtual machines scale set IP
-// Configuration's PublicIPAddress configuration
-type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct {
- // IdleTimeoutInMinutes - The idle timeout of the public IP address.
- IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"`
- // DNSSettings - The dns settings to be applied on the publicIP addresses .
- DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"`
- // IPTags - The list of IP tags associated with the public IP address.
- IPTags *[]VirtualMachineScaleSetIPTag `json:"ipTags,omitempty"`
- // PublicIPPrefix - The PublicIPPrefix from which to allocate publicIP addresses.
- PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"`
- // PublicIPAddressVersion - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionIPv4', 'IPVersionIPv6'
- PublicIPAddressVersion IPVersion `json:"publicIPAddressVersion,omitempty"`
- // DeleteOption - Specify what happens to the public IP when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
- DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
-}
-
-// VirtualMachineScaleSetReimageParameters describes a Virtual Machine Scale Set VM Reimage Parameters.
-type VirtualMachineScaleSetReimageParameters struct {
- // InstanceIds - The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set.
- InstanceIds *[]string `json:"instanceIds,omitempty"`
- // TempDisk - Specifies whether to reimage temp disk. Default value: false. Note: This temp disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
- TempDisk *bool `json:"tempDisk,omitempty"`
-}
-
-// VirtualMachineScaleSetRollingUpgradesCancelFuture an abstraction for monitoring and retrieving the
-// results of a long-running operation.
-type VirtualMachineScaleSetRollingUpgradesCancelFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetRollingUpgradesCancelFuture.Result.
-func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesCancelFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture an abstraction for monitoring and
-// retrieving the results of a long-running operation.
-type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture.Result.
-func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture an abstraction for monitoring and retrieving
-// the results of a long-running operation.
-type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetRollingUpgradesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture.Result.
-func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
-// a long-running operation.
-type VirtualMachineScaleSetsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (VirtualMachineScaleSet, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsCreateOrUpdateFuture.Result.
-func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmss.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent {
- vmss, err = client.CreateOrUpdateResponder(vmss.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetsDeallocateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsDeallocateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsDeallocateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsDeallocateFuture.Result.
-func (future *VirtualMachineScaleSetsDeallocateFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeallocateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeallocateFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsDeleteFuture.Result.
-func (future *VirtualMachineScaleSetsDeleteFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsDeleteInstancesFuture an abstraction for monitoring and retrieving the results of
-// a long-running operation.
-type VirtualMachineScaleSetsDeleteInstancesFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsDeleteInstancesFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsDeleteInstancesFuture.Result.
-func (future *VirtualMachineScaleSetsDeleteInstancesFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteInstancesFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteInstancesFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetSku describes an available virtual machine scale set sku.
-type VirtualMachineScaleSetSku struct {
- // ResourceType - READ-ONLY; The type of resource the sku applies to.
- ResourceType *string `json:"resourceType,omitempty"`
- // Sku - READ-ONLY; The Sku.
- Sku *Sku `json:"sku,omitempty"`
- // Capacity - READ-ONLY; Specifies the number of virtual machines in the scale set.
- Capacity *VirtualMachineScaleSetSkuCapacity `json:"capacity,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetSku.
-func (vmsss VirtualMachineScaleSetSku) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetSkuCapacity describes scaling information of a sku.
-type VirtualMachineScaleSetSkuCapacity struct {
- // Minimum - READ-ONLY; The minimum capacity.
- Minimum *int64 `json:"minimum,omitempty"`
- // Maximum - READ-ONLY; The maximum capacity that can be set.
- Maximum *int64 `json:"maximum,omitempty"`
- // DefaultCapacity - READ-ONLY; The default capacity.
- DefaultCapacity *int64 `json:"defaultCapacity,omitempty"`
- // ScaleType - READ-ONLY; The scale type applicable to the sku. Possible values include: 'VirtualMachineScaleSetSkuScaleTypeAutomatic', 'VirtualMachineScaleSetSkuScaleTypeNone'
- ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetSkuCapacity.
-func (vmsssc VirtualMachineScaleSetSkuCapacity) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetsPerformMaintenanceFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type VirtualMachineScaleSetsPerformMaintenanceFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsPerformMaintenanceFuture.Result.
-func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPerformMaintenanceFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsPowerOffFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsPowerOffFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsPowerOffFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsPowerOffFuture.Result.
-func (future *VirtualMachineScaleSetsPowerOffFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPowerOffFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPowerOffFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsRedeployFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsRedeployFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsRedeployFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsRedeployFuture.Result.
-func (future *VirtualMachineScaleSetsRedeployFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRedeployFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRedeployFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsReimageAllFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsReimageAllFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsReimageAllFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsReimageAllFuture.Result.
-func (future *VirtualMachineScaleSetsReimageAllFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageAllFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageAllFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsReimageFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsReimageFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsReimageFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsReimageFuture.Result.
-func (future *VirtualMachineScaleSetsReimageFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsRestartFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsRestartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsRestartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsRestartFuture.Result.
-func (future *VirtualMachineScaleSetsRestartFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRestartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRestartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsSetOrchestrationServiceStateFuture an abstraction for monitoring and retrieving
-// the results of a long-running operation.
-type VirtualMachineScaleSetsSetOrchestrationServiceStateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsSetOrchestrationServiceStateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsSetOrchestrationServiceStateFuture.Result.
-func (future *VirtualMachineScaleSetsSetOrchestrationServiceStateFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsSetOrchestrationServiceStateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsSetOrchestrationServiceStateFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetsStartFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsStartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsStartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsStartFuture.Result.
-func (future *VirtualMachineScaleSetsStartFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsStartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsStartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetStorageProfile describes a virtual machine scale set storage profile.
-type VirtualMachineScaleSetStorageProfile struct {
- // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations.
- ImageReference *ImageReference `json:"imageReference,omitempty"`
- // OsDisk - Specifies information about the operating system disk used by the virtual machines in the scale set.
For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview).
- OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"`
- // DataDisks - Specifies the parameters that are used to add data disks to the virtual machines in the scale set.
For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview).
- DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"`
-}
-
-// VirtualMachineScaleSetsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (VirtualMachineScaleSet, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsUpdateFuture.Result.
-func (future *VirtualMachineScaleSetsUpdateFuture) result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmss.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent {
- vmss, err = client.UpdateResponder(vmss.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetsUpdateInstancesFuture an abstraction for monitoring and retrieving the results of
-// a long-running operation.
-type VirtualMachineScaleSetsUpdateInstancesFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetsUpdateInstancesFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetsUpdateInstancesFuture.Result.
-func (future *VirtualMachineScaleSetsUpdateInstancesFuture) result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateInstancesFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateInstancesFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetUpdate describes a Virtual Machine Scale Set.
-type VirtualMachineScaleSetUpdate struct {
- // Sku - The virtual machine scale set sku.
- Sku *Sku `json:"sku,omitempty"`
- // Plan - The purchase plan when deploying a virtual machine scale set from VM Marketplace images.
- Plan *Plan `json:"plan,omitempty"`
- *VirtualMachineScaleSetUpdateProperties `json:"properties,omitempty"`
- // Identity - The identity of the virtual machine scale set, if configured.
- Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdate.
-func (vmssu VirtualMachineScaleSetUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssu.Sku != nil {
- objectMap["sku"] = vmssu.Sku
- }
- if vmssu.Plan != nil {
- objectMap["plan"] = vmssu.Plan
- }
- if vmssu.VirtualMachineScaleSetUpdateProperties != nil {
- objectMap["properties"] = vmssu.VirtualMachineScaleSetUpdateProperties
- }
- if vmssu.Identity != nil {
- objectMap["identity"] = vmssu.Identity
- }
- if vmssu.Tags != nil {
- objectMap["tags"] = vmssu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdate struct.
-func (vmssu *VirtualMachineScaleSetUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "sku":
- if v != nil {
- var sku Sku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- vmssu.Sku = &sku
- }
- case "plan":
- if v != nil {
- var plan Plan
- err = json.Unmarshal(*v, &plan)
- if err != nil {
- return err
- }
- vmssu.Plan = &plan
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetUpdateProperties VirtualMachineScaleSetUpdateProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateProperties)
- if err != nil {
- return err
- }
- vmssu.VirtualMachineScaleSetUpdateProperties = &virtualMachineScaleSetUpdateProperties
- }
- case "identity":
- if v != nil {
- var identity VirtualMachineScaleSetIdentity
- err = json.Unmarshal(*v, &identity)
- if err != nil {
- return err
- }
- vmssu.Identity = &identity
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmssu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetUpdateIPConfiguration describes a virtual machine scale set network profile's IP
-// configuration. NOTE: The subnet of a scale set may be modified as long as the original subnet and the
-// new subnet are in the same virtual network
-type VirtualMachineScaleSetUpdateIPConfiguration struct {
- // Name - The IP configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachineScaleSetUpdateIPConfigurationProperties `json:"properties,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdateIPConfiguration.
-func (vmssuic VirtualMachineScaleSetUpdateIPConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssuic.Name != nil {
- objectMap["name"] = vmssuic.Name
- }
- if vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties != nil {
- objectMap["properties"] = vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties
- }
- if vmssuic.ID != nil {
- objectMap["id"] = vmssuic.ID
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdateIPConfiguration struct.
-func (vmssuic *VirtualMachineScaleSetUpdateIPConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmssuic.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetUpdateIPConfigurationProperties VirtualMachineScaleSetUpdateIPConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateIPConfigurationProperties)
- if err != nil {
- return err
- }
- vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties = &virtualMachineScaleSetUpdateIPConfigurationProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmssuic.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetUpdateIPConfigurationProperties describes a virtual machine scale set network
-// profile's IP configuration properties.
-type VirtualMachineScaleSetUpdateIPConfigurationProperties struct {
- // Subnet - The subnet.
- Subnet *APIEntityReference `json:"subnet,omitempty"`
- // Primary - Specifies the primary IP Configuration in case the network interface has more than one IP Configuration.
- Primary *bool `json:"primary,omitempty"`
- // PublicIPAddressConfiguration - The publicIPAddressConfiguration.
- PublicIPAddressConfiguration *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"`
- // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPVersionIPv4', 'IPVersionIPv6'
- PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"`
- // ApplicationGatewayBackendAddressPools - The application gateway backend address pools.
- ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"`
- // ApplicationSecurityGroups - Specifies an array of references to application security group.
- ApplicationSecurityGroups *[]SubResource `json:"applicationSecurityGroups,omitempty"`
- // LoadBalancerBackendAddressPools - The load balancer backend address pools.
- LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"`
- // LoadBalancerInboundNatPools - The load balancer inbound nat pools.
- LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"`
-}
-
-// VirtualMachineScaleSetUpdateNetworkConfiguration describes a virtual machine scale set network profile's
-// network configurations.
-type VirtualMachineScaleSetUpdateNetworkConfiguration struct {
- // Name - The network configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachineScaleSetUpdateNetworkConfigurationProperties `json:"properties,omitempty"`
- // ID - Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdateNetworkConfiguration.
-func (vmssunc VirtualMachineScaleSetUpdateNetworkConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssunc.Name != nil {
- objectMap["name"] = vmssunc.Name
- }
- if vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties != nil {
- objectMap["properties"] = vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties
- }
- if vmssunc.ID != nil {
- objectMap["id"] = vmssunc.ID
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdateNetworkConfiguration struct.
-func (vmssunc *VirtualMachineScaleSetUpdateNetworkConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmssunc.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetUpdateNetworkConfigurationProperties VirtualMachineScaleSetUpdateNetworkConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateNetworkConfigurationProperties)
- if err != nil {
- return err
- }
- vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties = &virtualMachineScaleSetUpdateNetworkConfigurationProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmssunc.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetUpdateNetworkConfigurationProperties describes a virtual machine scale set
-// updatable network profile's IP configuration.Use this object for updating network profile's IP
-// Configuration.
-type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct {
- // Primary - Whether this is a primary NIC on a virtual machine.
- Primary *bool `json:"primary,omitempty"`
- // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled.
- EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"`
- // EnableFpga - Specifies whether the network interface is FPGA networking-enabled.
- EnableFpga *bool `json:"enableFpga,omitempty"`
- // NetworkSecurityGroup - The network security group.
- NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"`
- // DNSSettings - The dns settings to be applied on the network interfaces.
- DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"`
- // IPConfigurations - The virtual machine scale set IP Configuration.
- IPConfigurations *[]VirtualMachineScaleSetUpdateIPConfiguration `json:"ipConfigurations,omitempty"`
- // EnableIPForwarding - Whether IP forwarding enabled on this NIC.
- EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"`
- // DeleteOption - Specify what happens to the network interface when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
- DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
-}
-
-// VirtualMachineScaleSetUpdateNetworkProfile describes a virtual machine scale set network profile.
-type VirtualMachineScaleSetUpdateNetworkProfile struct {
- // HealthProbe - A reference to a load balancer probe used to determine the health of an instance in the virtual machine scale set. The reference will be in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
- HealthProbe *APIEntityReference `json:"healthProbe,omitempty"`
- // NetworkInterfaceConfigurations - The list of network configurations.
- NetworkInterfaceConfigurations *[]VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"`
- // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Possible values include: 'NetworkAPIVersionTwoZeroTwoZeroHyphenMinusOneOneHyphenMinusZeroOne'
- NetworkAPIVersion NetworkAPIVersion `json:"networkApiVersion,omitempty"`
-}
-
-// VirtualMachineScaleSetUpdateOSDisk describes virtual machine scale set operating system disk Update
-// Object. This should be used for Updating VMSS OS Disk.
-type VirtualMachineScaleSetUpdateOSDisk struct {
- // Caching - The caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite'
- Caching CachingTypes `json:"caching,omitempty"`
- // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk.
- WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
- // DiskSizeGB - Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
This value cannot be larger than 1023 GB
- DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
- // Image - The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before using it to attach to the Virtual Machine. If SourceImage is provided, the destination VirtualHardDisk should not exist.
- Image *VirtualHardDisk `json:"image,omitempty"`
- // VhdContainers - The list of virtual hard disk container uris.
- VhdContainers *[]string `json:"vhdContainers,omitempty"`
- // ManagedDisk - The managed disk parameters.
- ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"`
-}
-
-// VirtualMachineScaleSetUpdateOSProfile describes a virtual machine scale set OS profile.
-type VirtualMachineScaleSetUpdateOSProfile struct {
- // CustomData - A base-64 encoded string of custom data.
- CustomData *string `json:"customData,omitempty"`
- // WindowsConfiguration - The Windows Configuration of the OS profile.
- WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"`
- // LinuxConfiguration - The Linux Configuration of the OS profile.
- LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"`
- // Secrets - The List of certificates for addition to the VM.
- Secrets *[]VaultSecretGroup `json:"secrets,omitempty"`
-}
-
-// VirtualMachineScaleSetUpdateProperties describes the properties of a Virtual Machine Scale Set.
-type VirtualMachineScaleSetUpdateProperties struct {
- // UpgradePolicy - The upgrade policy.
- UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"`
- // AutomaticRepairsPolicy - Policy for automatic repairs.
- AutomaticRepairsPolicy *AutomaticRepairsPolicy `json:"automaticRepairsPolicy,omitempty"`
- // VirtualMachineProfile - The virtual machine profile.
- VirtualMachineProfile *VirtualMachineScaleSetUpdateVMProfile `json:"virtualMachineProfile,omitempty"`
- // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned.
- Overprovision *bool `json:"overprovision,omitempty"`
- // DoNotRunExtensionsOnOverprovisionedVMs - When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
- DoNotRunExtensionsOnOverprovisionedVMs *bool `json:"doNotRunExtensionsOnOverprovisionedVMs,omitempty"`
- // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
- SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"`
- // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
- AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"`
- // ScaleInPolicy - Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set.
- ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"`
- // ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine scale set should be assigned to.
Minimum api-version: 2018-04-01.
- ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"`
-}
-
-// VirtualMachineScaleSetUpdatePublicIPAddressConfiguration describes a virtual machines scale set IP
-// Configuration's PublicIPAddress configuration
-type VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct {
- // Name - The publicIP address configuration name.
- Name *string `json:"name,omitempty"`
- *VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties `json:"properties,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdatePublicIPAddressConfiguration.
-func (vmssupiac VirtualMachineScaleSetUpdatePublicIPAddressConfiguration) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssupiac.Name != nil {
- objectMap["name"] = vmssupiac.Name
- }
- if vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties != nil {
- objectMap["properties"] = vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct.
-func (vmssupiac *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmssupiac.Name = &name
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties)
- if err != nil {
- return err
- }
- vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties = &virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties describes a virtual machines scale
-// set IP Configuration's PublicIPAddress configuration
-type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties struct {
- // IdleTimeoutInMinutes - The idle timeout of the public IP address.
- IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"`
- // DNSSettings - The dns settings to be applied on the publicIP addresses .
- DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"`
- // DeleteOption - Specify what happens to the public IP when the VM is deleted. Possible values include: 'DeleteOptionsDelete', 'DeleteOptionsDetach'
- DeleteOption DeleteOptions `json:"deleteOption,omitempty"`
-}
-
-// VirtualMachineScaleSetUpdateStorageProfile describes a virtual machine scale set storage profile.
-type VirtualMachineScaleSetUpdateStorageProfile struct {
- // ImageReference - The image reference.
- ImageReference *ImageReference `json:"imageReference,omitempty"`
- // OsDisk - The OS disk.
- OsDisk *VirtualMachineScaleSetUpdateOSDisk `json:"osDisk,omitempty"`
- // DataDisks - The data disks.
- DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"`
-}
-
-// VirtualMachineScaleSetUpdateVMProfile describes a virtual machine scale set virtual machine profile.
-type VirtualMachineScaleSetUpdateVMProfile struct {
- // OsProfile - The virtual machine scale set OS profile.
- OsProfile *VirtualMachineScaleSetUpdateOSProfile `json:"osProfile,omitempty"`
- // StorageProfile - The virtual machine scale set storage profile.
- StorageProfile *VirtualMachineScaleSetUpdateStorageProfile `json:"storageProfile,omitempty"`
- // NetworkProfile - The virtual machine scale set network profile.
- NetworkProfile *VirtualMachineScaleSetUpdateNetworkProfile `json:"networkProfile,omitempty"`
- // SecurityProfile - The virtual machine scale set Security profile
- SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"`
- // DiagnosticsProfile - The virtual machine scale set diagnostics profile.
- DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"`
- // ExtensionProfile - The virtual machine scale set extension profile.
- ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"`
- // LicenseType - The license type, which is for bring your own license scenario.
- LicenseType *string `json:"licenseType,omitempty"`
- // BillingProfile - Specifies the billing related details of a Azure Spot VMSS.
Minimum api-version: 2019-03-01.
- BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
- // ScheduledEventsProfile - Specifies Scheduled Event related configurations.
- ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"`
- // UserData - UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here.
Minimum api-version: 2021-03-01
- UserData *string `json:"userData,omitempty"`
-}
-
-// VirtualMachineScaleSetVM describes a virtual machine scale set virtual machine.
-type VirtualMachineScaleSetVM struct {
- autorest.Response `json:"-"`
- // InstanceID - READ-ONLY; The virtual machine instance ID.
- InstanceID *string `json:"instanceId,omitempty"`
- // Sku - READ-ONLY; The virtual machine SKU.
- Sku *Sku `json:"sku,omitempty"`
- *VirtualMachineScaleSetVMProperties `json:"properties,omitempty"`
- // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
- Plan *Plan `json:"plan,omitempty"`
- // Resources - READ-ONLY; The virtual machine child extension resources.
- Resources *[]VirtualMachineExtension `json:"resources,omitempty"`
- // Zones - READ-ONLY; The virtual machine zones.
- Zones *[]string `json:"zones,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
- // Name - READ-ONLY; Resource name
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- // Location - Resource location
- Location *string `json:"location,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVM.
-func (vmssv VirtualMachineScaleSetVM) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssv.VirtualMachineScaleSetVMProperties != nil {
- objectMap["properties"] = vmssv.VirtualMachineScaleSetVMProperties
- }
- if vmssv.Plan != nil {
- objectMap["plan"] = vmssv.Plan
- }
- if vmssv.Location != nil {
- objectMap["location"] = vmssv.Location
- }
- if vmssv.Tags != nil {
- objectMap["tags"] = vmssv.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetVM struct.
-func (vmssv *VirtualMachineScaleSetVM) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "instanceId":
- if v != nil {
- var instanceID string
- err = json.Unmarshal(*v, &instanceID)
- if err != nil {
- return err
- }
- vmssv.InstanceID = &instanceID
- }
- case "sku":
- if v != nil {
- var sku Sku
- err = json.Unmarshal(*v, &sku)
- if err != nil {
- return err
- }
- vmssv.Sku = &sku
- }
- case "properties":
- if v != nil {
- var virtualMachineScaleSetVMProperties VirtualMachineScaleSetVMProperties
- err = json.Unmarshal(*v, &virtualMachineScaleSetVMProperties)
- if err != nil {
- return err
- }
- vmssv.VirtualMachineScaleSetVMProperties = &virtualMachineScaleSetVMProperties
- }
- case "plan":
- if v != nil {
- var plan Plan
- err = json.Unmarshal(*v, &plan)
- if err != nil {
- return err
- }
- vmssv.Plan = &plan
- }
- case "resources":
- if v != nil {
- var resources []VirtualMachineExtension
- err = json.Unmarshal(*v, &resources)
- if err != nil {
- return err
- }
- vmssv.Resources = &resources
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- vmssv.Zones = &zones
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmssv.ID = &ID
- }
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmssv.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vmssv.Type = &typeVar
- }
- case "location":
- if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
- if err != nil {
- return err
- }
- vmssv.Location = &location
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmssv.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetVMExtension describes a VMSS VM Extension.
-type VirtualMachineScaleSetVMExtension struct {
- autorest.Response `json:"-"`
- // Name - READ-ONLY; The name of the extension.
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- *VirtualMachineExtensionProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVMExtension.
-func (vmssve VirtualMachineScaleSetVMExtension) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssve.VirtualMachineExtensionProperties != nil {
- objectMap["properties"] = vmssve.VirtualMachineExtensionProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetVMExtension struct.
-func (vmssve *VirtualMachineScaleSetVMExtension) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmssve.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vmssve.Type = &typeVar
- }
- case "properties":
- if v != nil {
- var virtualMachineExtensionProperties VirtualMachineExtensionProperties
- err = json.Unmarshal(*v, &virtualMachineExtensionProperties)
- if err != nil {
- return err
- }
- vmssve.VirtualMachineExtensionProperties = &virtualMachineExtensionProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmssve.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
-// results of a long-running operation.
-type VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMExtensionsClient) (VirtualMachineScaleSetVMExtension, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture.Result.
-func (future *VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture) result(client VirtualMachineScaleSetVMExtensionsClient) (vmssve VirtualMachineScaleSetVMExtension, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmssve.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmssve.Response.Response, err = future.GetResult(sender); err == nil && vmssve.Response.Response.StatusCode != http.StatusNoContent {
- vmssve, err = client.CreateOrUpdateResponder(vmssve.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture", "Result", vmssve.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetVMExtensionsDeleteFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type VirtualMachineScaleSetVMExtensionsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMExtensionsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMExtensionsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMExtensionsDeleteFuture.Result.
-func (future *VirtualMachineScaleSetVMExtensionsDeleteFuture) result(client VirtualMachineScaleSetVMExtensionsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMExtensionsListResult the List VMSS VM Extension operation response
-type VirtualMachineScaleSetVMExtensionsListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of VMSS VM extensions
- Value *[]VirtualMachineScaleSetVMExtension `json:"value,omitempty"`
-}
-
-// VirtualMachineScaleSetVMExtensionsSummary extensions summary for virtual machines of a virtual machine
-// scale set.
-type VirtualMachineScaleSetVMExtensionsSummary struct {
- // Name - READ-ONLY; The extension name.
- Name *string `json:"name,omitempty"`
- // StatusesSummary - READ-ONLY; The extensions information.
- StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVMExtensionsSummary.
-func (vmssves VirtualMachineScaleSetVMExtensionsSummary) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetVMExtensionsUpdateFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type VirtualMachineScaleSetVMExtensionsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMExtensionsClient) (VirtualMachineScaleSetVMExtension, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMExtensionsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMExtensionsUpdateFuture.Result.
-func (future *VirtualMachineScaleSetVMExtensionsUpdateFuture) result(client VirtualMachineScaleSetVMExtensionsClient) (vmssve VirtualMachineScaleSetVMExtension, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmssve.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMExtensionsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmssve.Response.Response, err = future.GetResult(sender); err == nil && vmssve.Response.Response.StatusCode != http.StatusNoContent {
- vmssve, err = client.UpdateResponder(vmssve.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsUpdateFuture", "Result", vmssve.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetVMExtensionUpdate describes a VMSS VM Extension.
-type VirtualMachineScaleSetVMExtensionUpdate struct {
- // Name - READ-ONLY; The name of the extension.
- Name *string `json:"name,omitempty"`
- // Type - READ-ONLY; Resource type
- Type *string `json:"type,omitempty"`
- *VirtualMachineExtensionUpdateProperties `json:"properties,omitempty"`
- // ID - READ-ONLY; Resource Id
- ID *string `json:"id,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVMExtensionUpdate.
-func (vmssveu VirtualMachineScaleSetVMExtensionUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssveu.VirtualMachineExtensionUpdateProperties != nil {
- objectMap["properties"] = vmssveu.VirtualMachineExtensionUpdateProperties
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetVMExtensionUpdate struct.
-func (vmssveu *VirtualMachineScaleSetVMExtensionUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- vmssveu.Name = &name
- }
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- vmssveu.Type = &typeVar
- }
- case "properties":
- if v != nil {
- var virtualMachineExtensionUpdateProperties VirtualMachineExtensionUpdateProperties
- err = json.Unmarshal(*v, &virtualMachineExtensionUpdateProperties)
- if err != nil {
- return err
- }
- vmssveu.VirtualMachineExtensionUpdateProperties = &virtualMachineExtensionUpdateProperties
- }
- case "id":
- if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
- if err != nil {
- return err
- }
- vmssveu.ID = &ID
- }
- }
- }
-
- return nil
-}
-
-// VirtualMachineScaleSetVMInstanceIDs specifies a list of virtual machine instance IDs from the VM scale
-// set.
-type VirtualMachineScaleSetVMInstanceIDs struct {
- // InstanceIds - The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set.
- InstanceIds *[]string `json:"instanceIds,omitempty"`
-}
-
-// VirtualMachineScaleSetVMInstanceRequiredIDs specifies a list of virtual machine instance IDs from the VM
-// scale set.
-type VirtualMachineScaleSetVMInstanceRequiredIDs struct {
- // InstanceIds - The virtual machine scale set instance ids.
- InstanceIds *[]string `json:"instanceIds,omitempty"`
-}
-
-// VirtualMachineScaleSetVMInstanceView the instance view of a virtual machine scale set VM.
-type VirtualMachineScaleSetVMInstanceView struct {
- autorest.Response `json:"-"`
- // PlatformUpdateDomain - The Update Domain count.
- PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"`
- // PlatformFaultDomain - The Fault Domain count.
- PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"`
- // RdpThumbPrint - The Remote desktop certificate thumbprint.
- RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"`
- // VMAgent - The VM Agent running on the virtual machine.
- VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"`
- // MaintenanceRedeployStatus - The Maintenance Operation status on the virtual machine.
- MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"`
- // Disks - The disks information.
- Disks *[]DiskInstanceView `json:"disks,omitempty"`
- // Extensions - The extensions information.
- Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"`
- // VMHealth - READ-ONLY; The health status for the VM.
- VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty"`
- // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.
You can easily view the output of your console log.
Azure also enables you to see a screenshot of the VM from the hypervisor.
- BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"`
- // Statuses - The resource status information.
- Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
- // AssignedHost - READ-ONLY; Resource id of the dedicated host, on which the virtual machine is allocated through automatic placement, when the virtual machine is associated with a dedicated host group that has automatic placement enabled.
Minimum api-version: 2020-06-01.
- AssignedHost *string `json:"assignedHost,omitempty"`
- // PlacementGroupID - The placement group in which the VM is running. If the VM is deallocated it will not have a placementGroupId.
- PlacementGroupID *string `json:"placementGroupId,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVMInstanceView.
-func (vmssviv VirtualMachineScaleSetVMInstanceView) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssviv.PlatformUpdateDomain != nil {
- objectMap["platformUpdateDomain"] = vmssviv.PlatformUpdateDomain
- }
- if vmssviv.PlatformFaultDomain != nil {
- objectMap["platformFaultDomain"] = vmssviv.PlatformFaultDomain
- }
- if vmssviv.RdpThumbPrint != nil {
- objectMap["rdpThumbPrint"] = vmssviv.RdpThumbPrint
- }
- if vmssviv.VMAgent != nil {
- objectMap["vmAgent"] = vmssviv.VMAgent
- }
- if vmssviv.MaintenanceRedeployStatus != nil {
- objectMap["maintenanceRedeployStatus"] = vmssviv.MaintenanceRedeployStatus
- }
- if vmssviv.Disks != nil {
- objectMap["disks"] = vmssviv.Disks
- }
- if vmssviv.Extensions != nil {
- objectMap["extensions"] = vmssviv.Extensions
- }
- if vmssviv.BootDiagnostics != nil {
- objectMap["bootDiagnostics"] = vmssviv.BootDiagnostics
- }
- if vmssviv.Statuses != nil {
- objectMap["statuses"] = vmssviv.Statuses
- }
- if vmssviv.PlacementGroupID != nil {
- objectMap["placementGroupId"] = vmssviv.PlacementGroupID
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetVMListResult the List Virtual Machine Scale Set VMs operation response.
-type VirtualMachineScaleSetVMListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of virtual machine scale sets VMs.
- Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"`
- // NextLink - The uri to fetch the next page of Virtual Machine Scale Set VMs. Call ListNext() with this to fetch the next page of VMSS VMs
- NextLink *string `json:"nextLink,omitempty"`
-}
-
-// VirtualMachineScaleSetVMListResultIterator provides access to a complete listing of
-// VirtualMachineScaleSetVM values.
-type VirtualMachineScaleSetVMListResultIterator struct {
- i int
- page VirtualMachineScaleSetVMListResultPage
-}
-
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *VirtualMachineScaleSetVMListResultIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMListResultIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
- }
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
- }
- iter.i = 0
- return nil
-}
-
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *VirtualMachineScaleSetVMListResultIterator) Next() error {
- return iter.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter VirtualMachineScaleSetVMListResultIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
-}
-
-// Response returns the raw server response from the last page request.
-func (iter VirtualMachineScaleSetVMListResultIterator) Response() VirtualMachineScaleSetVMListResult {
- return iter.page.Response()
-}
-
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter VirtualMachineScaleSetVMListResultIterator) Value() VirtualMachineScaleSetVM {
- if !iter.page.NotDone() {
- return VirtualMachineScaleSetVM{}
- }
- return iter.page.Values()[iter.i]
-}
-
-// Creates a new instance of the VirtualMachineScaleSetVMListResultIterator type.
-func NewVirtualMachineScaleSetVMListResultIterator(page VirtualMachineScaleSetVMListResultPage) VirtualMachineScaleSetVMListResultIterator {
- return VirtualMachineScaleSetVMListResultIterator{page: page}
-}
-
-// IsEmpty returns true if the ListResult contains no values.
-func (vmssvlr VirtualMachineScaleSetVMListResult) IsEmpty() bool {
- return vmssvlr.Value == nil || len(*vmssvlr.Value) == 0
-}
-
-// hasNextLink returns true if the NextLink is not empty.
-func (vmssvlr VirtualMachineScaleSetVMListResult) hasNextLink() bool {
- return vmssvlr.NextLink != nil && len(*vmssvlr.NextLink) != 0
-}
-
-// virtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (vmssvlr VirtualMachineScaleSetVMListResult) virtualMachineScaleSetVMListResultPreparer(ctx context.Context) (*http.Request, error) {
- if !vmssvlr.hasNextLink() {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(vmssvlr.NextLink)))
-}
-
-// VirtualMachineScaleSetVMListResultPage contains a page of VirtualMachineScaleSetVM values.
-type VirtualMachineScaleSetVMListResultPage struct {
- fn func(context.Context, VirtualMachineScaleSetVMListResult) (VirtualMachineScaleSetVMListResult, error)
- vmssvlr VirtualMachineScaleSetVMListResult
-}
-
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *VirtualMachineScaleSetVMListResultPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMListResultPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- for {
- next, err := page.fn(ctx, page.vmssvlr)
- if err != nil {
- return err
- }
- page.vmssvlr = next
- if !next.hasNextLink() || !next.IsEmpty() {
- break
- }
- }
- return nil
-}
-
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *VirtualMachineScaleSetVMListResultPage) Next() error {
- return page.NextWithContext(context.Background())
-}
-
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page VirtualMachineScaleSetVMListResultPage) NotDone() bool {
- return !page.vmssvlr.IsEmpty()
-}
-
-// Response returns the raw server response from the last page request.
-func (page VirtualMachineScaleSetVMListResultPage) Response() VirtualMachineScaleSetVMListResult {
- return page.vmssvlr
-}
-
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page VirtualMachineScaleSetVMListResultPage) Values() []VirtualMachineScaleSetVM {
- if page.vmssvlr.IsEmpty() {
- return nil
- }
- return *page.vmssvlr.Value
-}
-
-// Creates a new instance of the VirtualMachineScaleSetVMListResultPage type.
-func NewVirtualMachineScaleSetVMListResultPage(cur VirtualMachineScaleSetVMListResult, getNextPage func(context.Context, VirtualMachineScaleSetVMListResult) (VirtualMachineScaleSetVMListResult, error)) VirtualMachineScaleSetVMListResultPage {
- return VirtualMachineScaleSetVMListResultPage{
- fn: getNextPage,
- vmssvlr: cur,
- }
-}
-
-// VirtualMachineScaleSetVMNetworkProfileConfiguration describes a virtual machine scale set VM network
-// profile.
-type VirtualMachineScaleSetVMNetworkProfileConfiguration struct {
- // NetworkInterfaceConfigurations - The list of network configurations.
- NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"`
-}
-
-// VirtualMachineScaleSetVMProfile describes a virtual machine scale set virtual machine profile.
-type VirtualMachineScaleSetVMProfile struct {
- // OsProfile - Specifies the operating system settings for the virtual machines in the scale set.
- OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"`
- // StorageProfile - Specifies the storage settings for the virtual machine disks.
- StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"`
- // NetworkProfile - Specifies properties of the network interfaces of the virtual machines in the scale set.
- NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"`
- // SecurityProfile - Specifies the Security related profile settings for the virtual machines in the scale set.
- SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"`
- // DiagnosticsProfile - Specifies the boot diagnostic settings state.
Minimum api-version: 2015-06-15.
- DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"`
- // ExtensionProfile - Specifies a collection of settings for extensions installed on virtual machines in the scale set.
- ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"`
- // LicenseType - Specifies that the image or disk that is being used was licensed on-premises.
Possible values for Windows Server operating system are:
Windows_Client
Windows_Server
Possible values for Linux Server operating system are:
RHEL_BYOS (for RHEL)
SLES_BYOS (for SUSE)
For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing)
[Azure Hybrid Use Benefit for Linux Server](https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux)
Minimum api-version: 2015-06-15
- LicenseType *string `json:"licenseType,omitempty"`
- // Priority - Specifies the priority for the virtual machines in the scale set.
Minimum api-version: 2017-10-30-preview. Possible values include: 'VirtualMachinePriorityTypesRegular', 'VirtualMachinePriorityTypesLow', 'VirtualMachinePriorityTypesSpot'
- Priority VirtualMachinePriorityTypes `json:"priority,omitempty"`
- // EvictionPolicy - Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set.
For Azure Spot virtual machines, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01.
For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. Possible values include: 'VirtualMachineEvictionPolicyTypesDeallocate', 'VirtualMachineEvictionPolicyTypesDelete'
- EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"`
- // BillingProfile - Specifies the billing related details of a Azure Spot VMSS.
Minimum api-version: 2019-03-01.
- BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
- // ScheduledEventsProfile - Specifies Scheduled Event related configurations.
- ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"`
- // UserData - UserData for the virtual machines in the scale set, which must be base-64 encoded. Customer should not pass any secrets in here.
Minimum api-version: 2021-03-01
- UserData *string `json:"userData,omitempty"`
- // CapacityReservation - Specifies the capacity reservation related details of a scale set.
Minimum api-version: 2021-04-01.
- CapacityReservation *CapacityReservationProfile `json:"capacityReservation,omitempty"`
- // ApplicationProfile - Specifies the gallery applications that should be made available to the VM/VMSS
- ApplicationProfile *ApplicationProfile `json:"applicationProfile,omitempty"`
-}
-
-// VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual
-// machine.
-type VirtualMachineScaleSetVMProperties struct {
- // LatestModelApplied - READ-ONLY; Specifies whether the latest model has been applied to the virtual machine.
- LatestModelApplied *bool `json:"latestModelApplied,omitempty"`
- // VMID - READ-ONLY; Azure VM unique ID.
- VMID *string `json:"vmId,omitempty"`
- // InstanceView - READ-ONLY; The virtual machine instance view.
- InstanceView *VirtualMachineScaleSetVMInstanceView `json:"instanceView,omitempty"`
- // HardwareProfile - Specifies the hardware settings for the virtual machine.
- HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"`
- // StorageProfile - Specifies the storage settings for the virtual machine disks.
- StorageProfile *StorageProfile `json:"storageProfile,omitempty"`
- // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine in the scale set. For instance: whether the virtual machine has the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
- AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"`
- // OsProfile - Specifies the operating system settings for the virtual machine.
- OsProfile *OSProfile `json:"osProfile,omitempty"`
- // SecurityProfile - Specifies the Security related profile settings for the virtual machine.
- SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"`
- // NetworkProfile - Specifies the network interfaces of the virtual machine.
- NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"`
- // NetworkProfileConfiguration - Specifies the network profile configuration of the virtual machine.
- NetworkProfileConfiguration *VirtualMachineScaleSetVMNetworkProfileConfiguration `json:"networkProfileConfiguration,omitempty"`
- // DiagnosticsProfile - Specifies the boot diagnostic settings state.
Minimum api-version: 2015-06-15.
- DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"`
- // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Availability sets overview](https://docs.microsoft.com/azure/virtual-machines/availability-set-overview).
For more information on Azure planned maintenance, see [Maintenance and updates for Virtual Machines in Azure](https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates)
Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
- AvailabilitySet *SubResource `json:"availabilitySet,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
- ProvisioningState *string `json:"provisioningState,omitempty"`
- // LicenseType - Specifies that the image or disk that is being used was licensed on-premises.
Possible values for Windows Server operating system are:
Windows_Client
Windows_Server
Possible values for Linux Server operating system are:
RHEL_BYOS (for RHEL)
SLES_BYOS (for SUSE)
For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing)
[Azure Hybrid Use Benefit for Linux Server](https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux)
Minimum api-version: 2015-06-15
- LicenseType *string `json:"licenseType,omitempty"`
- // ModelDefinitionApplied - READ-ONLY; Specifies whether the model applied to the virtual machine is the model of the virtual machine scale set or the customized model for the virtual machine.
- ModelDefinitionApplied *string `json:"modelDefinitionApplied,omitempty"`
- // ProtectionPolicy - Specifies the protection policy of the virtual machine.
- ProtectionPolicy *VirtualMachineScaleSetVMProtectionPolicy `json:"protectionPolicy,omitempty"`
- // UserData - UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here.
Minimum api-version: 2021-03-01
- UserData *string `json:"userData,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVMProperties.
-func (vmssvp VirtualMachineScaleSetVMProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmssvp.HardwareProfile != nil {
- objectMap["hardwareProfile"] = vmssvp.HardwareProfile
- }
- if vmssvp.StorageProfile != nil {
- objectMap["storageProfile"] = vmssvp.StorageProfile
- }
- if vmssvp.AdditionalCapabilities != nil {
- objectMap["additionalCapabilities"] = vmssvp.AdditionalCapabilities
- }
- if vmssvp.OsProfile != nil {
- objectMap["osProfile"] = vmssvp.OsProfile
- }
- if vmssvp.SecurityProfile != nil {
- objectMap["securityProfile"] = vmssvp.SecurityProfile
- }
- if vmssvp.NetworkProfile != nil {
- objectMap["networkProfile"] = vmssvp.NetworkProfile
- }
- if vmssvp.NetworkProfileConfiguration != nil {
- objectMap["networkProfileConfiguration"] = vmssvp.NetworkProfileConfiguration
- }
- if vmssvp.DiagnosticsProfile != nil {
- objectMap["diagnosticsProfile"] = vmssvp.DiagnosticsProfile
- }
- if vmssvp.AvailabilitySet != nil {
- objectMap["availabilitySet"] = vmssvp.AvailabilitySet
- }
- if vmssvp.LicenseType != nil {
- objectMap["licenseType"] = vmssvp.LicenseType
- }
- if vmssvp.ProtectionPolicy != nil {
- objectMap["protectionPolicy"] = vmssvp.ProtectionPolicy
- }
- if vmssvp.UserData != nil {
- objectMap["userData"] = vmssvp.UserData
- }
- return json.Marshal(objectMap)
-}
-
-// VirtualMachineScaleSetVMProtectionPolicy the protection policy of a virtual machine scale set VM.
-type VirtualMachineScaleSetVMProtectionPolicy struct {
- // ProtectFromScaleIn - Indicates that the virtual machine scale set VM shouldn't be considered for deletion during a scale-in operation.
- ProtectFromScaleIn *bool `json:"protectFromScaleIn,omitempty"`
- // ProtectFromScaleSetActions - Indicates that model updates or actions (including scale-in) initiated on the virtual machine scale set should not be applied to the virtual machine scale set VM.
- ProtectFromScaleSetActions *bool `json:"protectFromScaleSetActions,omitempty"`
-}
-
-// VirtualMachineScaleSetVMReimageParameters describes a Virtual Machine Scale Set VM Reimage Parameters.
-type VirtualMachineScaleSetVMReimageParameters struct {
- // TempDisk - Specifies whether to reimage temp disk. Default value: false. Note: This temp disk reimage parameter is only supported for VM/VMSS with Ephemeral OS disk.
- TempDisk *bool `json:"tempDisk,omitempty"`
-}
-
-// VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
-// results of a long-running operation.
-type VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMRunCommandsClient) (VirtualMachineRunCommand, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture.Result.
-func (future *VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture) result(client VirtualMachineScaleSetVMRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmrc.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmrc.Response.Response, err = future.GetResult(sender); err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent {
- vmrc, err = client.CreateOrUpdateResponder(vmrc.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetVMRunCommandsDeleteFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type VirtualMachineScaleSetVMRunCommandsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMRunCommandsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMRunCommandsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMRunCommandsDeleteFuture.Result.
-func (future *VirtualMachineScaleSetVMRunCommandsDeleteFuture) result(client VirtualMachineScaleSetVMRunCommandsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMRunCommandsUpdateFuture an abstraction for monitoring and retrieving the results
-// of a long-running operation.
-type VirtualMachineScaleSetVMRunCommandsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMRunCommandsClient) (VirtualMachineRunCommand, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMRunCommandsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMRunCommandsUpdateFuture.Result.
-func (future *VirtualMachineScaleSetVMRunCommandsUpdateFuture) result(client VirtualMachineScaleSetVMRunCommandsClient) (vmrc VirtualMachineRunCommand, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmrc.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmrc.Response.Response, err = future.GetResult(sender); err == nil && vmrc.Response.Response.StatusCode != http.StatusNoContent {
- vmrc, err = client.UpdateResponder(vmrc.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsUpdateFuture", "Result", vmrc.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetVMsDeallocateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsDeallocateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsDeallocateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsDeallocateFuture.Result.
-func (future *VirtualMachineScaleSetVMsDeallocateFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeallocateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeallocateFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsDeleteFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsDeleteFuture.Result.
-func (future *VirtualMachineScaleSetVMsDeleteFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsPerformMaintenanceFuture an abstraction for monitoring and retrieving the
-// results of a long-running operation.
-type VirtualMachineScaleSetVMsPerformMaintenanceFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsPerformMaintenanceFuture.Result.
-func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsPowerOffFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsPowerOffFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsPowerOffFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsPowerOffFuture.Result.
-func (future *VirtualMachineScaleSetVMsPowerOffFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPowerOffFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPowerOffFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsRedeployFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsRedeployFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsRedeployFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsRedeployFuture.Result.
-func (future *VirtualMachineScaleSetVMsRedeployFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRedeployFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRedeployFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsReimageAllFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsReimageAllFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsReimageAllFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsReimageAllFuture.Result.
-func (future *VirtualMachineScaleSetVMsReimageAllFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageAllFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageAllFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsReimageFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsReimageFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsReimageFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsReimageFuture.Result.
-func (future *VirtualMachineScaleSetVMsReimageFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsRestartFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsRestartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsRestartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsRestartFuture.Result.
-func (future *VirtualMachineScaleSetVMsRestartFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRestartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRestartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsRunCommandFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsRunCommandFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (RunCommandResult, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsRunCommandFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsRunCommandFuture.Result.
-func (future *VirtualMachineScaleSetVMsRunCommandFuture) result(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- rcr.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRunCommandFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent {
- rcr, err = client.RunCommandResponder(rcr.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineScaleSetVMsStartFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsStartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsStartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsStartFuture.Result.
-func (future *VirtualMachineScaleSetVMsStartFuture) result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsStartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsStartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineScaleSetVMsUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachineScaleSetVMsUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachineScaleSetVMsClient) (VirtualMachineScaleSetVM, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachineScaleSetVMsUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachineScaleSetVMsUpdateFuture.Result.
-func (future *VirtualMachineScaleSetVMsUpdateFuture) result(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmssv.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmssv.Response.Response, err = future.GetResult(sender); err == nil && vmssv.Response.Response.StatusCode != http.StatusNoContent {
- vmssv, err = client.UpdateResponder(vmssv.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", vmssv.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachinesCaptureFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesCaptureFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (VirtualMachineCaptureResult, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesCaptureFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesCaptureFuture.Result.
-func (future *VirtualMachinesCaptureFuture) result(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmcr.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCaptureFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmcr.Response.Response, err = future.GetResult(sender); err == nil && vmcr.Response.Response.StatusCode != http.StatusNoContent {
- vmcr, err = client.CaptureResponder(vmcr.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", vmcr.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachinesConvertToManagedDisksFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachinesConvertToManagedDisksFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesConvertToManagedDisksFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesConvertToManagedDisksFuture.Result.
-func (future *VirtualMachinesConvertToManagedDisksFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesConvertToManagedDisksFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesConvertToManagedDisksFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachinesCreateOrUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (VirtualMachine, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesCreateOrUpdateFuture.Result.
-func (future *VirtualMachinesCreateOrUpdateFuture) result(client VirtualMachinesClient) (VM VirtualMachine, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- VM.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCreateOrUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent {
- VM, err = client.CreateOrUpdateResponder(VM.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", VM.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachinesDeallocateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachinesDeallocateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesDeallocateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesDeallocateFuture.Result.
-func (future *VirtualMachinesDeallocateFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeallocateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeallocateFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesDeleteFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesDeleteFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesDeleteFuture.Result.
-func (future *VirtualMachinesDeleteFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeleteFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeleteFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesInstallPatchesFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachinesInstallPatchesFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (VirtualMachineInstallPatchesResult, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesInstallPatchesFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesInstallPatchesFuture.Result.
-func (future *VirtualMachinesInstallPatchesFuture) result(client VirtualMachinesClient) (vmipr VirtualMachineInstallPatchesResult, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesInstallPatchesFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- vmipr.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesInstallPatchesFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if vmipr.Response.Response, err = future.GetResult(sender); err == nil && vmipr.Response.Response.StatusCode != http.StatusNoContent {
- vmipr, err = client.InstallPatchesResponder(vmipr.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesInstallPatchesFuture", "Result", vmipr.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineSize describes the properties of a VM size.
-type VirtualMachineSize struct {
- // Name - The name of the virtual machine size.
- Name *string `json:"name,omitempty"`
- // NumberOfCores - The number of cores supported by the virtual machine size. For Constrained vCPU capable VM sizes, this number represents the total vCPUs of quota that the VM uses. For accurate vCPU count, please refer to https://docs.microsoft.com/azure/virtual-machines/constrained-vcpu or https://docs.microsoft.com/rest/api/compute/resourceskus/list
- NumberOfCores *int32 `json:"numberOfCores,omitempty"`
- // OsDiskSizeInMB - The OS disk size, in MB, allowed by the virtual machine size.
- OsDiskSizeInMB *int32 `json:"osDiskSizeInMB,omitempty"`
- // ResourceDiskSizeInMB - The resource disk size, in MB, allowed by the virtual machine size.
- ResourceDiskSizeInMB *int32 `json:"resourceDiskSizeInMB,omitempty"`
- // MemoryInMB - The amount of memory, in MB, supported by the virtual machine size.
- MemoryInMB *int32 `json:"memoryInMB,omitempty"`
- // MaxDataDiskCount - The maximum number of data disks that can be attached to the virtual machine size.
- MaxDataDiskCount *int32 `json:"maxDataDiskCount,omitempty"`
-}
-
-// VirtualMachineSizeListResult the List Virtual Machine operation response.
-type VirtualMachineSizeListResult struct {
- autorest.Response `json:"-"`
- // Value - The list of virtual machine sizes.
- Value *[]VirtualMachineSize `json:"value,omitempty"`
-}
-
-// VirtualMachineSoftwarePatchProperties describes the properties of a Virtual Machine software patch.
-type VirtualMachineSoftwarePatchProperties struct {
- // PatchID - READ-ONLY; A unique identifier for the patch.
- PatchID *string `json:"patchId,omitempty"`
- // Name - READ-ONLY; The friendly name of the patch.
- Name *string `json:"name,omitempty"`
- // Version - READ-ONLY; The version number of the patch. This property applies only to Linux patches.
- Version *string `json:"version,omitempty"`
- // KbID - READ-ONLY; The KBID of the patch. Only applies to Windows patches.
- KbID *string `json:"kbId,omitempty"`
- // Classifications - READ-ONLY; The classification(s) of the patch as provided by the patch publisher.
- Classifications *[]string `json:"classifications,omitempty"`
- // RebootBehavior - READ-ONLY; Describes the reboot requirements of the patch. Possible values include: 'VMGuestPatchRebootBehaviorUnknown', 'VMGuestPatchRebootBehaviorNeverReboots', 'VMGuestPatchRebootBehaviorAlwaysRequiresReboot', 'VMGuestPatchRebootBehaviorCanRequestReboot'
- RebootBehavior VMGuestPatchRebootBehavior `json:"rebootBehavior,omitempty"`
- // ActivityID - READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension logs.
- ActivityID *string `json:"activityId,omitempty"`
- // PublishedDate - READ-ONLY; The UTC timestamp when the repository published this patch.
- PublishedDate *date.Time `json:"publishedDate,omitempty"`
- // LastModifiedDateTime - READ-ONLY; The UTC timestamp of the last update to this patch record.
- LastModifiedDateTime *date.Time `json:"lastModifiedDateTime,omitempty"`
- // AssessmentState - READ-ONLY; Describes the availability of a given patch. Possible values include: 'PatchAssessmentStateUnknown', 'PatchAssessmentStateAvailable'
- AssessmentState PatchAssessmentState `json:"assessmentState,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineSoftwarePatchProperties.
-func (vmspp VirtualMachineSoftwarePatchProperties) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachinesPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachinesPerformMaintenanceFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesPerformMaintenanceFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesPerformMaintenanceFuture.Result.
-func (future *VirtualMachinesPerformMaintenanceFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPerformMaintenanceFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPerformMaintenanceFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesPowerOffFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesPowerOffFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesPowerOffFuture.Result.
-func (future *VirtualMachinesPowerOffFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPowerOffFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPowerOffFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesReapplyFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesReapplyFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesReapplyFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesReapplyFuture.Result.
-func (future *VirtualMachinesReapplyFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReapplyFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReapplyFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesRedeployFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesRedeployFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesRedeployFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesRedeployFuture.Result.
-func (future *VirtualMachinesRedeployFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRedeployFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRedeployFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesReimageFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesReimageFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesReimageFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesReimageFuture.Result.
-func (future *VirtualMachinesReimageFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReimageFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReimageFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesRestartFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesRestartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesRestartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesRestartFuture.Result.
-func (future *VirtualMachinesRestartFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRestartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRestartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachinesRunCommandFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type VirtualMachinesRunCommandFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (RunCommandResult, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesRunCommandFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesRunCommandFuture.Result.
-func (future *VirtualMachinesRunCommandFuture) result(client VirtualMachinesClient) (rcr RunCommandResult, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- rcr.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRunCommandFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent {
- rcr, err = client.RunCommandResponder(rcr.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachinesStartFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesStartFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (autorest.Response, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesStartFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesStartFuture.Result.
-func (future *VirtualMachinesStartFuture) result(client VirtualMachinesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesStartFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- ar.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesStartFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
-// VirtualMachineStatusCodeCount the status code and count of the virtual machine scale set instance view
-// status summary.
-type VirtualMachineStatusCodeCount struct {
- // Code - READ-ONLY; The instance view status code.
- Code *string `json:"code,omitempty"`
- // Count - READ-ONLY; The number of instances having a particular status code.
- Count *int32 `json:"count,omitempty"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineStatusCodeCount.
-func (vmscc VirtualMachineStatusCodeCount) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- return json.Marshal(objectMap)
-}
-
-// VirtualMachinesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type VirtualMachinesUpdateFuture struct {
- azure.FutureAPI
- // Result returns the result of the asynchronous operation.
- // If the operation has not completed it will return an error.
- Result func(VirtualMachinesClient) (VirtualMachine, error)
-}
-
-// UnmarshalJSON is the custom unmarshaller for CreateFuture.
-func (future *VirtualMachinesUpdateFuture) UnmarshalJSON(body []byte) error {
- var azFuture azure.Future
- if err := json.Unmarshal(body, &azFuture); err != nil {
- return err
- }
- future.FutureAPI = &azFuture
- future.Result = future.result
- return nil
-}
-
-// result is the default implementation for VirtualMachinesUpdateFuture.Result.
-func (future *VirtualMachinesUpdateFuture) result(client VirtualMachinesClient) (VM VirtualMachine, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- VM.Response.Response = future.Response()
- err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesUpdateFuture")
- return
- }
- sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent {
- VM, err = client.UpdateResponder(VM.Response.Response)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", VM.Response.Response, "Failure responding to request")
- }
- }
- return
-}
-
-// VirtualMachineUpdate describes a Virtual Machine Update.
-type VirtualMachineUpdate struct {
- // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
- Plan *Plan `json:"plan,omitempty"`
- *VirtualMachineProperties `json:"properties,omitempty"`
- // Identity - The identity of the virtual machine, if configured.
- Identity *VirtualMachineIdentity `json:"identity,omitempty"`
- // Zones - The virtual machine zones.
- Zones *[]string `json:"zones,omitempty"`
- // Tags - Resource tags
- Tags map[string]*string `json:"tags"`
-}
-
-// MarshalJSON is the custom marshaler for VirtualMachineUpdate.
-func (vmu VirtualMachineUpdate) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if vmu.Plan != nil {
- objectMap["plan"] = vmu.Plan
- }
- if vmu.VirtualMachineProperties != nil {
- objectMap["properties"] = vmu.VirtualMachineProperties
- }
- if vmu.Identity != nil {
- objectMap["identity"] = vmu.Identity
- }
- if vmu.Zones != nil {
- objectMap["zones"] = vmu.Zones
- }
- if vmu.Tags != nil {
- objectMap["tags"] = vmu.Tags
- }
- return json.Marshal(objectMap)
-}
-
-// UnmarshalJSON is the custom unmarshaler for VirtualMachineUpdate struct.
-func (vmu *VirtualMachineUpdate) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "plan":
- if v != nil {
- var plan Plan
- err = json.Unmarshal(*v, &plan)
- if err != nil {
- return err
- }
- vmu.Plan = &plan
- }
- case "properties":
- if v != nil {
- var virtualMachineProperties VirtualMachineProperties
- err = json.Unmarshal(*v, &virtualMachineProperties)
- if err != nil {
- return err
- }
- vmu.VirtualMachineProperties = &virtualMachineProperties
- }
- case "identity":
- if v != nil {
- var identity VirtualMachineIdentity
- err = json.Unmarshal(*v, &identity)
- if err != nil {
- return err
- }
- vmu.Identity = &identity
- }
- case "zones":
- if v != nil {
- var zones []string
- err = json.Unmarshal(*v, &zones)
- if err != nil {
- return err
- }
- vmu.Zones = &zones
- }
- case "tags":
- if v != nil {
- var tags map[string]*string
- err = json.Unmarshal(*v, &tags)
- if err != nil {
- return err
- }
- vmu.Tags = tags
- }
- }
- }
-
- return nil
-}
-
-// VMGalleryApplication specifies the required information to reference a compute gallery application
-// version
-type VMGalleryApplication struct {
- // Tags - Optional, Specifies a passthrough value for more generic context.
- Tags *string `json:"tags,omitempty"`
- // Order - Optional, Specifies the order in which the packages have to be installed
- Order *int32 `json:"order,omitempty"`
- // PackageReferenceID - Specifies the GalleryApplicationVersion resource id on the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{application}/versions/{version}
- PackageReferenceID *string `json:"packageReferenceId,omitempty"`
- // ConfigurationReference - Optional, Specifies the uri to an azure blob that will replace the default configuration for the package if provided
- ConfigurationReference *string `json:"configurationReference,omitempty"`
-}
-
-// VMScaleSetConvertToSinglePlacementGroupInput ...
-type VMScaleSetConvertToSinglePlacementGroupInput struct {
- // ActivePlacementGroupID - Id of the placement group in which you want future virtual machine instances to be placed. To query placement group Id, please use Virtual Machine Scale Set VMs - Get API. If not provided, the platform will choose one with maximum number of virtual machine instances.
- ActivePlacementGroupID *string `json:"activePlacementGroupId,omitempty"`
-}
-
-// VMSizeProperties specifies VM Size Property settings on the virtual machine.
-type VMSizeProperties struct {
- // VCPUsAvailable - Specifies the number of vCPUs available for the VM.
When this property is not specified in the request body the default behavior is to set it to the value of vCPUs available for that VM size exposed in api response of [List all available virtual machine sizes in a region](https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list) .
- VCPUsAvailable *int32 `json:"vCPUsAvailable,omitempty"`
- // VCPUsPerCore - Specifies the vCPU to physical core ratio.
When this property is not specified in the request body the default behavior is set to the value of vCPUsPerCore for the VM Size exposed in api response of [List all available virtual machine sizes in a region](https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list)
Setting this property to 1 also means that hyper-threading is disabled.
- VCPUsPerCore *int32 `json:"vCPUsPerCore,omitempty"`
-}
-
-// WindowsConfiguration specifies Windows operating system settings on the virtual machine.
-type WindowsConfiguration struct {
- // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.
When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later.
- ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"`
- // EnableAutomaticUpdates - Indicates whether Automatic Updates is enabled for the Windows virtual machine. Default value is true.
For virtual machine scale sets, this property can be updated and updates will take effect on OS reprovisioning.
- EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"`
- // TimeZone - Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time".
Possible values can be [TimeZoneInfo.Id](https://docs.microsoft.com/dotnet/api/system.timezoneinfo.id?#System_TimeZoneInfo_Id) value from time zones returned by [TimeZoneInfo.GetSystemTimeZones](https://docs.microsoft.com/dotnet/api/system.timezoneinfo.getsystemtimezones).
- TimeZone *string `json:"timeZone,omitempty"`
- // AdditionalUnattendContent - Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup.
- AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"`
- // PatchSettings - [Preview Feature] Specifies settings related to VM Guest Patching on Windows.
- PatchSettings *PatchSettings `json:"patchSettings,omitempty"`
- // WinRM - Specifies the Windows Remote Management listeners. This enables remote Windows PowerShell.
- WinRM *WinRMConfiguration `json:"winRM,omitempty"`
-}
-
-// WindowsParameters input for InstallPatches on a Windows VM, as directly received by the API
-type WindowsParameters struct {
- // ClassificationsToInclude - The update classifications to select when installing patches for Windows.
- ClassificationsToInclude *[]VMGuestPatchClassificationWindows `json:"classificationsToInclude,omitempty"`
- // KbNumbersToInclude - Kbs to include in the patch operation
- KbNumbersToInclude *[]string `json:"kbNumbersToInclude,omitempty"`
- // KbNumbersToExclude - Kbs to exclude in the patch operation
- KbNumbersToExclude *[]string `json:"kbNumbersToExclude,omitempty"`
- // ExcludeKbsRequiringReboot - Filters out Kbs that don't have an InstallationRebootBehavior of 'NeverReboots' when this is set to true.
- ExcludeKbsRequiringReboot *bool `json:"excludeKbsRequiringReboot,omitempty"`
- // MaxPatchPublishDate - This is used to install patches that were published on or before this given max published date.
- MaxPatchPublishDate *date.Time `json:"maxPatchPublishDate,omitempty"`
-}
-
-// WinRMConfiguration describes Windows Remote Management configuration of the VM
-type WinRMConfiguration struct {
- // Listeners - The list of Windows Remote Management listeners
- Listeners *[]WinRMListener `json:"listeners,omitempty"`
-}
-
-// WinRMListener describes Protocol and thumbprint of Windows Remote Management listener
-type WinRMListener struct {
- // Protocol - Specifies the protocol of WinRM listener.
Possible values are:
**http**
**https**. Possible values include: 'ProtocolTypesHTTP', 'ProtocolTypesHTTPS'
- Protocol ProtocolTypes `json:"protocol,omitempty"`
- // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:
{
"data":"",
"dataType":"pfx",
"password":""
}
To install certificates on a virtual machine it is recommended to use the [Azure Key Vault virtual machine extension for Linux](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux) or the [Azure Key Vault virtual machine extension for Windows](https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows).
- CertificateURL *string `json:"certificateUrl,omitempty"`
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go
deleted file mode 100644
index b76ee91bfdf0..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/operations.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// OperationsClient is the compute Client
-type OperationsClient struct {
- BaseClient
-}
-
-// NewOperationsClient creates an instance of the OperationsClient client.
-func NewOperationsClient(subscriptionID string) OperationsClient {
- return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this
-// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
- return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// List gets a list of compute operations.
-func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPath("/providers/Microsoft.Compute/operations"),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go
deleted file mode 100644
index 8144933b91db..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/proximityplacementgroups.go
+++ /dev/null
@@ -1,575 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// ProximityPlacementGroupsClient is the compute Client
-type ProximityPlacementGroupsClient struct {
- BaseClient
-}
-
-// NewProximityPlacementGroupsClient creates an instance of the ProximityPlacementGroupsClient client.
-func NewProximityPlacementGroupsClient(subscriptionID string) ProximityPlacementGroupsClient {
- return NewProximityPlacementGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewProximityPlacementGroupsClientWithBaseURI creates an instance of the ProximityPlacementGroupsClient client using
-// a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewProximityPlacementGroupsClientWithBaseURI(baseURI string, subscriptionID string) ProximityPlacementGroupsClient {
- return ProximityPlacementGroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate create or update a proximity placement group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// proximityPlacementGroupName - the name of the proximity placement group.
-// parameters - parameters supplied to the Create Proximity Placement Group operation.
-func (client ProximityPlacementGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters ProximityPlacementGroup) (result ProximityPlacementGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, proximityPlacementGroupName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.CreateOrUpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "CreateOrUpdate", resp, "Failure sending request")
- return
- }
-
- result, err = client.CreateOrUpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "CreateOrUpdate", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client ProximityPlacementGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters ProximityPlacementGroup) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "proximityPlacementGroupName": autorest.Encode("path", proximityPlacementGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client ProximityPlacementGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client ProximityPlacementGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ProximityPlacementGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete a proximity placement group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// proximityPlacementGroupName - the name of the proximity placement group.
-func (client ProximityPlacementGroupsClient) Delete(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.Delete")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, proximityPlacementGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.DeleteSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Delete", resp, "Failure sending request")
- return
- }
-
- result, err = client.DeleteResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Delete", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client ProximityPlacementGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "proximityPlacementGroupName": autorest.Encode("path", proximityPlacementGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client ProximityPlacementGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client ProximityPlacementGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about a proximity placement group .
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// proximityPlacementGroupName - the name of the proximity placement group.
-// includeColocationStatus - includeColocationStatus=true enables fetching the colocation status of all the
-// resources in the proximity placement group.
-func (client ProximityPlacementGroupsClient) Get(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, includeColocationStatus string) (result ProximityPlacementGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, proximityPlacementGroupName, includeColocationStatus)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client ProximityPlacementGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, includeColocationStatus string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "proximityPlacementGroupName": autorest.Encode("path", proximityPlacementGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(includeColocationStatus) > 0 {
- queryParameters["includeColocationStatus"] = autorest.Encode("query", includeColocationStatus)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client ProximityPlacementGroupsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client ProximityPlacementGroupsClient) GetResponder(resp *http.Response) (result ProximityPlacementGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByResourceGroup lists all proximity placement groups in a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client ProximityPlacementGroupsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ProximityPlacementGroupListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.ppglr.Response.Response != nil {
- sc = result.ppglr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.ppglr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.ppglr, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.ppglr.hasNextLink() && result.ppglr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client ProximityPlacementGroupsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client ProximityPlacementGroupsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client ProximityPlacementGroupsClient) ListByResourceGroupResponder(resp *http.Response) (result ProximityPlacementGroupListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client ProximityPlacementGroupsClient) listByResourceGroupNextResults(ctx context.Context, lastResults ProximityPlacementGroupListResult) (result ProximityPlacementGroupListResult, err error) {
- req, err := lastResults.proximityPlacementGroupListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client ProximityPlacementGroupsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ProximityPlacementGroupListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// ListBySubscription lists all proximity placement groups in a subscription.
-func (client ProximityPlacementGroupsClient) ListBySubscription(ctx context.Context) (result ProximityPlacementGroupListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.ppglr.Response.Response != nil {
- sc = result.ppglr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listBySubscriptionNextResults
- req, err := client.ListBySubscriptionPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "ListBySubscription", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.ppglr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "ListBySubscription", resp, "Failure sending request")
- return
- }
-
- result.ppglr, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "ListBySubscription", resp, "Failure responding to request")
- return
- }
- if result.ppglr.hasNextLink() && result.ppglr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListBySubscriptionPreparer prepares the ListBySubscription request.
-func (client ProximityPlacementGroupsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/proximityPlacementGroups", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
-// http.Response Body if it receives an error.
-func (client ProximityPlacementGroupsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
-// closes the http.Response Body.
-func (client ProximityPlacementGroupsClient) ListBySubscriptionResponder(resp *http.Response) (result ProximityPlacementGroupListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listBySubscriptionNextResults retrieves the next set of results, if any.
-func (client ProximityPlacementGroupsClient) listBySubscriptionNextResults(ctx context.Context, lastResults ProximityPlacementGroupListResult) (result ProximityPlacementGroupListResult, err error) {
- req, err := lastResults.proximityPlacementGroupListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
-func (client ProximityPlacementGroupsClient) ListBySubscriptionComplete(ctx context.Context) (result ProximityPlacementGroupListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListBySubscription(ctx)
- return
-}
-
-// Update update a proximity placement group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// proximityPlacementGroupName - the name of the proximity placement group.
-// parameters - parameters supplied to the Update Proximity Placement Group operation.
-func (client ProximityPlacementGroupsClient) Update(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters ProximityPlacementGroupUpdate) (result ProximityPlacementGroup, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.Update")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, proximityPlacementGroupName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.UpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Update", resp, "Failure sending request")
- return
- }
-
- result, err = client.UpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Update", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client ProximityPlacementGroupsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters ProximityPlacementGroupUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "proximityPlacementGroupName": autorest.Encode("path", proximityPlacementGroupName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client ProximityPlacementGroupsClient) UpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client ProximityPlacementGroupsClient) UpdateResponder(resp *http.Response) (result ProximityPlacementGroup, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/resourceskus.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/resourceskus.go
deleted file mode 100644
index 35266973811b..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/resourceskus.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// ResourceSkusClient is the compute Client
-type ResourceSkusClient struct {
- BaseClient
-}
-
-// NewResourceSkusClient creates an instance of the ResourceSkusClient client.
-func NewResourceSkusClient(subscriptionID string) ResourceSkusClient {
- return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client using a custom endpoint. Use
-// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient {
- return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// List gets the list of Microsoft.Compute SKUs available for your Subscription.
-// Parameters:
-// filter - the filter to apply on the operation. Only **location** filter is supported currently.
-// includeExtendedLocations - to Include Extended Locations information or not in the response.
-func (client ResourceSkusClient) List(ctx context.Context, filter string, includeExtendedLocations string) (result ResourceSkusResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List")
- defer func() {
- sc := -1
- if result.rsr.Response.Response != nil {
- sc = result.rsr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, filter, includeExtendedLocations)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.rsr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure sending request")
- return
- }
-
- result.rsr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure responding to request")
- return
- }
- if result.rsr.hasNextLink() && result.rsr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client ResourceSkusClient) ListPreparer(ctx context.Context, filter string, includeExtendedLocations string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(filter) > 0 {
- queryParameters["$filter"] = autorest.Encode("query", filter)
- }
- if len(includeExtendedLocations) > 0 {
- queryParameters["includeExtendedLocations"] = autorest.Encode("query", includeExtendedLocations)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkusResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client ResourceSkusClient) listNextResults(ctx context.Context, lastResults ResourceSkusResult) (result ResourceSkusResult, err error) {
- req, err := lastResults.resourceSkusResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client ResourceSkusClient) ListComplete(ctx context.Context, filter string, includeExtendedLocations string) (result ResourceSkusResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, filter, includeExtendedLocations)
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go
deleted file mode 100644
index ea9d27484cef..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepointcollections.go
+++ /dev/null
@@ -1,582 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// RestorePointCollectionsClient is the compute Client
-type RestorePointCollectionsClient struct {
- BaseClient
-}
-
-// NewRestorePointCollectionsClient creates an instance of the RestorePointCollectionsClient client.
-func NewRestorePointCollectionsClient(subscriptionID string) RestorePointCollectionsClient {
- return NewRestorePointCollectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewRestorePointCollectionsClientWithBaseURI creates an instance of the RestorePointCollectionsClient client using a
-// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
-// Azure stack).
-func NewRestorePointCollectionsClientWithBaseURI(baseURI string, subscriptionID string) RestorePointCollectionsClient {
- return RestorePointCollectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate the operation to create or update the restore point collection. Please refer to
-// https://aka.ms/RestorePoints for more details. When updating a restore point collection, only tags may be modified.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection.
-// parameters - parameters supplied to the Create or Update restore point collection operation.
-func (client RestorePointCollectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollection) (result RestorePointCollection, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, restorePointCollectionName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.CreateOrUpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "CreateOrUpdate", resp, "Failure sending request")
- return
- }
-
- result, err = client.CreateOrUpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "CreateOrUpdate", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client RestorePointCollectionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollection) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointCollectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client RestorePointCollectionsClient) CreateOrUpdateResponder(resp *http.Response) (result RestorePointCollection, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete the restore point collection. This operation will also delete all the contained
-// restore points.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the Restore Point Collection.
-func (client RestorePointCollectionsClient) Delete(ctx context.Context, resourceGroupName string, restorePointCollectionName string) (result RestorePointCollectionsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, restorePointCollectionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client RestorePointCollectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointCollectionsClient) DeleteSender(req *http.Request) (future RestorePointCollectionsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client RestorePointCollectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get the operation to get the restore point collection.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection.
-// expand - the expand expression to apply on the operation. If expand=restorePoints, server will return all
-// contained restore points in the restorePointCollection.
-func (client RestorePointCollectionsClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, expand RestorePointCollectionExpandOptions) (result RestorePointCollection, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, restorePointCollectionName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client RestorePointCollectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, expand RestorePointCollectionExpandOptions) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointCollectionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client RestorePointCollectionsClient) GetResponder(resp *http.Response) (result RestorePointCollection, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets the list of restore point collections in a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client RestorePointCollectionsClient) List(ctx context.Context, resourceGroupName string) (result RestorePointCollectionListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.List")
- defer func() {
- sc := -1
- if result.rpclr.Response.Response != nil {
- sc = result.rpclr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.rpclr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.rpclr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.rpclr.hasNextLink() && result.rpclr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client RestorePointCollectionsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointCollectionsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client RestorePointCollectionsClient) ListResponder(resp *http.Response) (result RestorePointCollectionListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client RestorePointCollectionsClient) listNextResults(ctx context.Context, lastResults RestorePointCollectionListResult) (result RestorePointCollectionListResult, err error) {
- req, err := lastResults.restorePointCollectionListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client RestorePointCollectionsClient) ListComplete(ctx context.Context, resourceGroupName string) (result RestorePointCollectionListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName)
- return
-}
-
-// ListAll gets the list of restore point collections in the subscription. Use nextLink property in the response to get
-// the next page of restore point collections. Do this till nextLink is not null to fetch all the restore point
-// collections.
-func (client RestorePointCollectionsClient) ListAll(ctx context.Context) (result RestorePointCollectionListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.ListAll")
- defer func() {
- sc := -1
- if result.rpclr.Response.Response != nil {
- sc = result.rpclr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listAllNextResults
- req, err := client.ListAllPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "ListAll", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListAllSender(req)
- if err != nil {
- result.rpclr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "ListAll", resp, "Failure sending request")
- return
- }
-
- result.rpclr, err = client.ListAllResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "ListAll", resp, "Failure responding to request")
- return
- }
- if result.rpclr.hasNextLink() && result.rpclr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListAllPreparer prepares the ListAll request.
-func (client RestorePointCollectionsClient) ListAllPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/restorePointCollections", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListAllSender sends the ListAll request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointCollectionsClient) ListAllSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListAllResponder handles the response to the ListAll request. The method always
-// closes the http.Response Body.
-func (client RestorePointCollectionsClient) ListAllResponder(resp *http.Response) (result RestorePointCollectionListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listAllNextResults retrieves the next set of results, if any.
-func (client RestorePointCollectionsClient) listAllNextResults(ctx context.Context, lastResults RestorePointCollectionListResult) (result RestorePointCollectionListResult, err error) {
- req, err := lastResults.restorePointCollectionListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listAllNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListAllSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listAllNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListAllResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "listAllNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
-func (client RestorePointCollectionsClient) ListAllComplete(ctx context.Context) (result RestorePointCollectionListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.ListAll")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListAll(ctx)
- return
-}
-
-// Update the operation to update the restore point collection.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection.
-// parameters - parameters supplied to the Update restore point collection operation.
-func (client RestorePointCollectionsClient) Update(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollectionUpdate) (result RestorePointCollection, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointCollectionsClient.Update")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, restorePointCollectionName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.UpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Update", resp, "Failure sending request")
- return
- }
-
- result, err = client.UpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointCollectionsClient", "Update", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client RestorePointCollectionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollectionUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointCollectionsClient) UpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client RestorePointCollectionsClient) UpdateResponder(resp *http.Response) (result RestorePointCollection, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go
deleted file mode 100644
index ad79c4b0eec5..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/restorepoints.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// RestorePointsClient is the compute Client
-type RestorePointsClient struct {
- BaseClient
-}
-
-// NewRestorePointsClient creates an instance of the RestorePointsClient client.
-func NewRestorePointsClient(subscriptionID string) RestorePointsClient {
- return NewRestorePointsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewRestorePointsClientWithBaseURI creates an instance of the RestorePointsClient client using a custom endpoint.
-// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewRestorePointsClientWithBaseURI(baseURI string, subscriptionID string) RestorePointsClient {
- return RestorePointsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Create the operation to create the restore point. Updating properties of an existing restore point is not allowed
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection.
-// restorePointName - the name of the restore point.
-// parameters - parameters supplied to the Create restore point operation.
-func (client RestorePointsClient) Create(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint) (result RestorePointsCreateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointsClient.Create")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.RestorePointProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
- }},
- {Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.RestorePointProperties.SourceMetadata.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
- }},
- }},
- }},
- }},
- }},
- }}}}}); err != nil {
- return result, validation.NewError("compute.RestorePointsClient", "Create", err.Error())
- }
-
- req, err := client.CreatePreparer(ctx, resourceGroupName, restorePointCollectionName, restorePointName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Create", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Create", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreatePreparer prepares the Create request.
-func (client RestorePointsClient) CreatePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "restorePointName": autorest.Encode("path", restorePointName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateSender sends the Create request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointsClient) CreateSender(req *http.Request) (future RestorePointsCreateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateResponder handles the response to the Create request. The method always
-// closes the http.Response Body.
-func (client RestorePointsClient) CreateResponder(resp *http.Response) (result RestorePoint, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete the restore point.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the Restore Point Collection.
-// restorePointName - the name of the restore point.
-func (client RestorePointsClient) Delete(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (result RestorePointsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, restorePointCollectionName, restorePointName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client RestorePointsClient) DeletePreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "restorePointName": autorest.Encode("path", restorePointName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointsClient) DeleteSender(req *http.Request) (future RestorePointsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client RestorePointsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get the operation to get the restore point.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// restorePointCollectionName - the name of the restore point collection.
-// restorePointName - the name of the restore point.
-func (client RestorePointsClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (result RestorePoint, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, restorePointCollectionName, restorePointName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.RestorePointsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client RestorePointsClient) GetPreparer(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "restorePointCollectionName": autorest.Encode("path", restorePointCollectionName),
- "restorePointName": autorest.Encode("path", restorePointName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client RestorePointsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client RestorePointsClient) GetResponder(resp *http.Response) (result RestorePoint, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleries.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleries.go
deleted file mode 100644
index 261ed0770ac0..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleries.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// SharedGalleriesClient is the compute Client
-type SharedGalleriesClient struct {
- BaseClient
-}
-
-// NewSharedGalleriesClient creates an instance of the SharedGalleriesClient client.
-func NewSharedGalleriesClient(subscriptionID string) SharedGalleriesClient {
- return NewSharedGalleriesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewSharedGalleriesClientWithBaseURI creates an instance of the SharedGalleriesClient client using a custom endpoint.
-// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewSharedGalleriesClientWithBaseURI(baseURI string, subscriptionID string) SharedGalleriesClient {
- return SharedGalleriesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get get a shared gallery by subscription id or tenant id.
-// Parameters:
-// location - resource location.
-// galleryUniqueName - the unique name of the Shared Gallery.
-func (client SharedGalleriesClient) Get(ctx context.Context, location string, galleryUniqueName string) (result SharedGallery, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleriesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, galleryUniqueName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client SharedGalleriesClient) GetPreparer(ctx context.Context, location string, galleryUniqueName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryUniqueName": autorest.Encode("path", galleryUniqueName),
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client SharedGalleriesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client SharedGalleriesClient) GetResponder(resp *http.Response) (result SharedGallery, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List list shared galleries by subscription id or tenant id.
-// Parameters:
-// location - resource location.
-// sharedTo - the query parameter to decide what shared galleries to fetch when doing listing operations.
-func (client SharedGalleriesClient) List(ctx context.Context, location string, sharedTo SharedToValues) (result SharedGalleryListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleriesClient.List")
- defer func() {
- sc := -1
- if result.sgl.Response.Response != nil {
- sc = result.sgl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, location, sharedTo)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.sgl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.sgl, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.sgl.hasNextLink() && result.sgl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client SharedGalleriesClient) ListPreparer(ctx context.Context, location string, sharedTo SharedToValues) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(sharedTo)) > 0 {
- queryParameters["sharedTo"] = autorest.Encode("query", sharedTo)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client SharedGalleriesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client SharedGalleriesClient) ListResponder(resp *http.Response) (result SharedGalleryList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client SharedGalleriesClient) listNextResults(ctx context.Context, lastResults SharedGalleryList) (result SharedGalleryList, err error) {
- req, err := lastResults.sharedGalleryListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleriesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client SharedGalleriesClient) ListComplete(ctx context.Context, location string, sharedTo SharedToValues) (result SharedGalleryListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleriesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, location, sharedTo)
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimages.go
deleted file mode 100644
index 5378b4f89811..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimages.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// SharedGalleryImagesClient is the compute Client
-type SharedGalleryImagesClient struct {
- BaseClient
-}
-
-// NewSharedGalleryImagesClient creates an instance of the SharedGalleryImagesClient client.
-func NewSharedGalleryImagesClient(subscriptionID string) SharedGalleryImagesClient {
- return NewSharedGalleryImagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewSharedGalleryImagesClientWithBaseURI creates an instance of the SharedGalleryImagesClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewSharedGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) SharedGalleryImagesClient {
- return SharedGalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get get a shared gallery image by subscription id or tenant id.
-// Parameters:
-// location - resource location.
-// galleryUniqueName - the unique name of the Shared Gallery.
-// galleryImageName - the name of the Shared Gallery Image Definition from which the Image Versions are to be
-// listed.
-func (client SharedGalleryImagesClient) Get(ctx context.Context, location string, galleryUniqueName string, galleryImageName string) (result SharedGalleryImage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImagesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, galleryUniqueName, galleryImageName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client SharedGalleryImagesClient) GetPreparer(ctx context.Context, location string, galleryUniqueName string, galleryImageName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryUniqueName": autorest.Encode("path", galleryUniqueName),
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client SharedGalleryImagesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client SharedGalleryImagesClient) GetResponder(resp *http.Response) (result SharedGalleryImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List list shared gallery images by subscription id or tenant id.
-// Parameters:
-// location - resource location.
-// galleryUniqueName - the unique name of the Shared Gallery.
-// sharedTo - the query parameter to decide what shared galleries to fetch when doing listing operations.
-func (client SharedGalleryImagesClient) List(ctx context.Context, location string, galleryUniqueName string, sharedTo SharedToValues) (result SharedGalleryImageListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImagesClient.List")
- defer func() {
- sc := -1
- if result.sgil.Response.Response != nil {
- sc = result.sgil.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, location, galleryUniqueName, sharedTo)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.sgil.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.sgil, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.sgil.hasNextLink() && result.sgil.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client SharedGalleryImagesClient) ListPreparer(ctx context.Context, location string, galleryUniqueName string, sharedTo SharedToValues) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryUniqueName": autorest.Encode("path", galleryUniqueName),
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(sharedTo)) > 0 {
- queryParameters["sharedTo"] = autorest.Encode("query", sharedTo)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client SharedGalleryImagesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client SharedGalleryImagesClient) ListResponder(resp *http.Response) (result SharedGalleryImageList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client SharedGalleryImagesClient) listNextResults(ctx context.Context, lastResults SharedGalleryImageList) (result SharedGalleryImageList, err error) {
- req, err := lastResults.sharedGalleryImageListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImagesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client SharedGalleryImagesClient) ListComplete(ctx context.Context, location string, galleryUniqueName string, sharedTo SharedToValues) (result SharedGalleryImageListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImagesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, location, galleryUniqueName, sharedTo)
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimageversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimageversions.go
deleted file mode 100644
index 325d3f48b3e9..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sharedgalleryimageversions.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// SharedGalleryImageVersionsClient is the compute Client
-type SharedGalleryImageVersionsClient struct {
- BaseClient
-}
-
-// NewSharedGalleryImageVersionsClient creates an instance of the SharedGalleryImageVersionsClient client.
-func NewSharedGalleryImageVersionsClient(subscriptionID string) SharedGalleryImageVersionsClient {
- return NewSharedGalleryImageVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewSharedGalleryImageVersionsClientWithBaseURI creates an instance of the SharedGalleryImageVersionsClient client
-// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewSharedGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) SharedGalleryImageVersionsClient {
- return SharedGalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get get a shared gallery image version by subscription id or tenant id.
-// Parameters:
-// location - resource location.
-// galleryUniqueName - the unique name of the Shared Gallery.
-// galleryImageName - the name of the Shared Gallery Image Definition from which the Image Versions are to be
-// listed.
-// galleryImageVersionName - the name of the gallery image version to be created. Needs to follow semantic
-// version name pattern: The allowed characters are digit and period. Digits must be within the range of a
-// 32-bit integer. Format: ..
-func (client SharedGalleryImageVersionsClient) Get(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, galleryImageVersionName string) (result SharedGalleryImageVersion, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, galleryUniqueName, galleryImageName, galleryImageVersionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client SharedGalleryImageVersionsClient) GetPreparer(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, galleryImageVersionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName),
- "galleryUniqueName": autorest.Encode("path", galleryUniqueName),
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client SharedGalleryImageVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client SharedGalleryImageVersionsClient) GetResponder(resp *http.Response) (result SharedGalleryImageVersion, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List list shared gallery image versions by subscription id or tenant id.
-// Parameters:
-// location - resource location.
-// galleryUniqueName - the unique name of the Shared Gallery.
-// galleryImageName - the name of the Shared Gallery Image Definition from which the Image Versions are to be
-// listed.
-// sharedTo - the query parameter to decide what shared galleries to fetch when doing listing operations.
-func (client SharedGalleryImageVersionsClient) List(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, sharedTo SharedToValues) (result SharedGalleryImageVersionListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionsClient.List")
- defer func() {
- sc := -1
- if result.sgivl.Response.Response != nil {
- sc = result.sgivl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, location, galleryUniqueName, galleryImageName, sharedTo)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.sgivl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.sgivl, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.sgivl.hasNextLink() && result.sgivl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client SharedGalleryImageVersionsClient) ListPreparer(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, sharedTo SharedToValues) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "galleryImageName": autorest.Encode("path", galleryImageName),
- "galleryUniqueName": autorest.Encode("path", galleryUniqueName),
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(sharedTo)) > 0 {
- queryParameters["sharedTo"] = autorest.Encode("query", sharedTo)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}/versions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client SharedGalleryImageVersionsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client SharedGalleryImageVersionsClient) ListResponder(resp *http.Response) (result SharedGalleryImageVersionList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client SharedGalleryImageVersionsClient) listNextResults(ctx context.Context, lastResults SharedGalleryImageVersionList) (result SharedGalleryImageVersionList, err error) {
- req, err := lastResults.sharedGalleryImageVersionListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SharedGalleryImageVersionsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client SharedGalleryImageVersionsClient) ListComplete(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, sharedTo SharedToValues) (result SharedGalleryImageVersionListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SharedGalleryImageVersionsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, location, galleryUniqueName, galleryImageName, sharedTo)
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go
deleted file mode 100644
index 3590bc688f6b..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/snapshots.go
+++ /dev/null
@@ -1,778 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// SnapshotsClient is the compute Client
-type SnapshotsClient struct {
- BaseClient
-}
-
-// NewSnapshotsClient creates an instance of the SnapshotsClient client.
-func NewSnapshotsClient(subscriptionID string) SnapshotsClient {
- return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient client using a custom endpoint. Use this
-// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient {
- return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate creates or updates a snapshot.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot
-// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80
-// characters.
-// snapshot - snapshot object supplied in the body of the Put disk operation.
-func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (result SnapshotsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: snapshot,
- Constraints: []validation.Constraint{{Target: "snapshot.SnapshotProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.PurchasePlan", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.PurchasePlan.Publisher", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "snapshot.SnapshotProperties.PurchasePlan.Name", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "snapshot.SnapshotProperties.PurchasePlan.Product", Name: validation.Null, Rule: true, Chain: nil},
- }},
- {Target: "snapshot.SnapshotProperties.CreationData", Name: validation.Null, Rule: true,
- Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData.ImageReference", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}},
- {Target: "snapshot.SnapshotProperties.CreationData.GalleryImageReference", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData.GalleryImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}},
- }},
- {Target: "snapshot.SnapshotProperties.EncryptionSettingsCollection", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.EncryptionSettingsCollection.Enabled", Name: validation.Null, Rule: true, Chain: nil}}},
- }}}}}); err != nil {
- return result, validation.NewError("compute.SnapshotsClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, snapshotName, snapshot)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "snapshotName": autorest.Encode("path", snapshotName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- snapshot.ManagedBy = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
- autorest.WithJSON(snapshot),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future SnapshotsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete deletes a snapshot.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot
-// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80
-// characters.
-func (client SnapshotsClient) Delete(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, snapshotName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "snapshotName": autorest.Encode("path", snapshotName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get gets information about a snapshot.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot
-// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80
-// characters.
-func (client SnapshotsClient) Get(ctx context.Context, resourceGroupName string, snapshotName string) (result Snapshot, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, snapshotName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "snapshotName": autorest.Encode("path", snapshotName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GrantAccess grants access to a snapshot.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot
-// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80
-// characters.
-// grantAccessData - access data object supplied in the body of the get snapshot access operation.
-func (client SnapshotsClient) GrantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (result SnapshotsGrantAccessFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.GrantAccess")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: grantAccessData,
- Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.SnapshotsClient", "GrantAccess", err.Error())
- }
-
- req, err := client.GrantAccessPreparer(ctx, resourceGroupName, snapshotName, grantAccessData)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", nil, "Failure preparing request")
- return
- }
-
- result, err = client.GrantAccessSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// GrantAccessPreparer prepares the GrantAccess request.
-func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "snapshotName": autorest.Encode("path", snapshotName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", pathParameters),
- autorest.WithJSON(grantAccessData),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GrantAccessSender sends the GrantAccess request. The method will close the
-// http.Response Body if it receives an error.
-func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future SnapshotsGrantAccessFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// GrantAccessResponder handles the response to the GrantAccess request. The method always
-// closes the http.Response Body.
-func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List lists snapshots under a subscription.
-func (client SnapshotsClient) List(ctx context.Context) (result SnapshotListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.List")
- defer func() {
- sc := -1
- if result.sl.Response.Response != nil {
- sc = result.sl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.sl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.sl, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.sl.hasNextLink() && result.sl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client SnapshotsClient) listNextResults(ctx context.Context, lastResults SnapshotList) (result SnapshotList, err error) {
- req, err := lastResults.snapshotListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client SnapshotsClient) ListComplete(ctx context.Context) (result SnapshotListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx)
- return
-}
-
-// ListByResourceGroup lists snapshots under a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client SnapshotsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result SnapshotListPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.sl.Response.Response != nil {
- sc = result.sl.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.sl.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.sl, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.sl.hasNextLink() && result.sl.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client SnapshotsClient) listByResourceGroupNextResults(ctx context.Context, lastResults SnapshotList) (result SnapshotList, err error) {
- req, err := lastResults.snapshotListPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client SnapshotsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result SnapshotListIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// RevokeAccess revokes access to a snapshot.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot
-// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80
-// characters.
-func (client SnapshotsClient) RevokeAccess(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsRevokeAccessFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.RevokeAccess")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, snapshotName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RevokeAccessSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RevokeAccessPreparer prepares the RevokeAccess request.
-func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "snapshotName": autorest.Encode("path", snapshotName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RevokeAccessSender sends the RevokeAccess request. The method will close the
-// http.Response Body if it receives an error.
-func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future SnapshotsRevokeAccessFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RevokeAccessResponder handles the response to the RevokeAccess request. The method always
-// closes the http.Response Body.
-func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Update updates (patches) a snapshot.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot
-// is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80
-// characters.
-// snapshot - snapshot object supplied in the body of the Patch snapshot operation.
-func (client SnapshotsClient) Update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (result SnapshotsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, snapshotName, snapshot)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "snapshotName": autorest.Encode("path", snapshotName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-04-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
- autorest.WithJSON(snapshot),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go
deleted file mode 100644
index 896ec1f1edbf..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/sshpublickeys.go
+++ /dev/null
@@ -1,649 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// SSHPublicKeysClient is the compute Client
-type SSHPublicKeysClient struct {
- BaseClient
-}
-
-// NewSSHPublicKeysClient creates an instance of the SSHPublicKeysClient client.
-func NewSSHPublicKeysClient(subscriptionID string) SSHPublicKeysClient {
- return NewSSHPublicKeysClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewSSHPublicKeysClientWithBaseURI creates an instance of the SSHPublicKeysClient client using a custom endpoint.
-// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewSSHPublicKeysClientWithBaseURI(baseURI string, subscriptionID string) SSHPublicKeysClient {
- return SSHPublicKeysClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Create creates a new SSH public key resource.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// SSHPublicKeyName - the name of the SSH public key.
-// parameters - parameters supplied to create the SSH public key.
-func (client SSHPublicKeysClient) Create(ctx context.Context, resourceGroupName string, SSHPublicKeyName string, parameters SSHPublicKeyResource) (result SSHPublicKeyResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.Create")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreatePreparer(ctx, resourceGroupName, SSHPublicKeyName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Create", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.CreateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Create", resp, "Failure sending request")
- return
- }
-
- result, err = client.CreateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Create", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// CreatePreparer prepares the Create request.
-func (client SSHPublicKeysClient) CreatePreparer(ctx context.Context, resourceGroupName string, SSHPublicKeyName string, parameters SSHPublicKeyResource) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "sshPublicKeyName": autorest.Encode("path", SSHPublicKeyName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateSender sends the Create request. The method will close the
-// http.Response Body if it receives an error.
-func (client SSHPublicKeysClient) CreateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// CreateResponder handles the response to the Create request. The method always
-// closes the http.Response Body.
-func (client SSHPublicKeysClient) CreateResponder(resp *http.Response) (result SSHPublicKeyResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete delete an SSH public key.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// SSHPublicKeyName - the name of the SSH public key.
-func (client SSHPublicKeysClient) Delete(ctx context.Context, resourceGroupName string, SSHPublicKeyName string) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.Delete")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, SSHPublicKeyName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.DeleteSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Delete", resp, "Failure sending request")
- return
- }
-
- result, err = client.DeleteResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Delete", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client SSHPublicKeysClient) DeletePreparer(ctx context.Context, resourceGroupName string, SSHPublicKeyName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "sshPublicKeyName": autorest.Encode("path", SSHPublicKeyName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client SSHPublicKeysClient) DeleteSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client SSHPublicKeysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// GenerateKeyPair generates and returns a public/private key pair and populates the SSH public key resource with the
-// public key. The length of the key will be 3072 bits. This operation can only be performed once per SSH public key
-// resource.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// SSHPublicKeyName - the name of the SSH public key.
-func (client SSHPublicKeysClient) GenerateKeyPair(ctx context.Context, resourceGroupName string, SSHPublicKeyName string) (result SSHPublicKeyGenerateKeyPairResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.GenerateKeyPair")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GenerateKeyPairPreparer(ctx, resourceGroupName, SSHPublicKeyName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "GenerateKeyPair", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GenerateKeyPairSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "GenerateKeyPair", resp, "Failure sending request")
- return
- }
-
- result, err = client.GenerateKeyPairResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "GenerateKeyPair", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GenerateKeyPairPreparer prepares the GenerateKeyPair request.
-func (client SSHPublicKeysClient) GenerateKeyPairPreparer(ctx context.Context, resourceGroupName string, SSHPublicKeyName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "sshPublicKeyName": autorest.Encode("path", SSHPublicKeyName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GenerateKeyPairSender sends the GenerateKeyPair request. The method will close the
-// http.Response Body if it receives an error.
-func (client SSHPublicKeysClient) GenerateKeyPairSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GenerateKeyPairResponder handles the response to the GenerateKeyPair request. The method always
-// closes the http.Response Body.
-func (client SSHPublicKeysClient) GenerateKeyPairResponder(resp *http.Response) (result SSHPublicKeyGenerateKeyPairResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Get retrieves information about an SSH public key.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// SSHPublicKeyName - the name of the SSH public key.
-func (client SSHPublicKeysClient) Get(ctx context.Context, resourceGroupName string, SSHPublicKeyName string) (result SSHPublicKeyResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, SSHPublicKeyName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client SSHPublicKeysClient) GetPreparer(ctx context.Context, resourceGroupName string, SSHPublicKeyName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "sshPublicKeyName": autorest.Encode("path", SSHPublicKeyName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client SSHPublicKeysClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client SSHPublicKeysClient) GetResponder(resp *http.Response) (result SSHPublicKeyResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByResourceGroup lists all of the SSH public keys in the specified resource group. Use the nextLink property in
-// the response to get the next page of SSH public keys.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client SSHPublicKeysClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result SSHPublicKeysGroupListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.spkglr.Response.Response != nil {
- sc = result.spkglr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByResourceGroupNextResults
- req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "ListByResourceGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.spkglr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "ListByResourceGroup", resp, "Failure sending request")
- return
- }
-
- result.spkglr, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "ListByResourceGroup", resp, "Failure responding to request")
- return
- }
- if result.spkglr.hasNextLink() && result.spkglr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
-func (client SSHPublicKeysClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client SSHPublicKeysClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
-// closes the http.Response Body.
-func (client SSHPublicKeysClient) ListByResourceGroupResponder(resp *http.Response) (result SSHPublicKeysGroupListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client SSHPublicKeysClient) listByResourceGroupNextResults(ctx context.Context, lastResults SSHPublicKeysGroupListResult) (result SSHPublicKeysGroupListResult, err error) {
- req, err := lastResults.sSHPublicKeysGroupListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByResourceGroupSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByResourceGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
-func (client SSHPublicKeysClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result SSHPublicKeysGroupListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.ListByResourceGroup")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
- return
-}
-
-// ListBySubscription lists all of the SSH public keys in the subscription. Use the nextLink property in the response
-// to get the next page of SSH public keys.
-func (client SSHPublicKeysClient) ListBySubscription(ctx context.Context) (result SSHPublicKeysGroupListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.spkglr.Response.Response != nil {
- sc = result.spkglr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listBySubscriptionNextResults
- req, err := client.ListBySubscriptionPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "ListBySubscription", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.spkglr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "ListBySubscription", resp, "Failure sending request")
- return
- }
-
- result.spkglr, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "ListBySubscription", resp, "Failure responding to request")
- return
- }
- if result.spkglr.hasNextLink() && result.spkglr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListBySubscriptionPreparer prepares the ListBySubscription request.
-func (client SSHPublicKeysClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
-// http.Response Body if it receives an error.
-func (client SSHPublicKeysClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
-// closes the http.Response Body.
-func (client SSHPublicKeysClient) ListBySubscriptionResponder(resp *http.Response) (result SSHPublicKeysGroupListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listBySubscriptionNextResults retrieves the next set of results, if any.
-func (client SSHPublicKeysClient) listBySubscriptionNextResults(ctx context.Context, lastResults SSHPublicKeysGroupListResult) (result SSHPublicKeysGroupListResult, err error) {
- req, err := lastResults.sSHPublicKeysGroupListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListBySubscriptionSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListBySubscriptionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
-func (client SSHPublicKeysClient) ListBySubscriptionComplete(ctx context.Context) (result SSHPublicKeysGroupListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.ListBySubscription")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListBySubscription(ctx)
- return
-}
-
-// Update updates a new SSH public key resource.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// SSHPublicKeyName - the name of the SSH public key.
-// parameters - parameters supplied to update the SSH public key.
-func (client SSHPublicKeysClient) Update(ctx context.Context, resourceGroupName string, SSHPublicKeyName string, parameters SSHPublicKeyUpdateResource) (result SSHPublicKeyResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/SSHPublicKeysClient.Update")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, SSHPublicKeyName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Update", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.UpdateSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Update", resp, "Failure sending request")
- return
- }
-
- result, err = client.UpdateResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.SSHPublicKeysClient", "Update", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client SSHPublicKeysClient) UpdatePreparer(ctx context.Context, resourceGroupName string, SSHPublicKeyName string, parameters SSHPublicKeyUpdateResource) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "sshPublicKeyName": autorest.Encode("path", SSHPublicKeyName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client SSHPublicKeysClient) UpdateSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client SSHPublicKeysClient) UpdateResponder(resp *http.Response) (result SSHPublicKeyResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go
deleted file mode 100644
index ab20765aebf9..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/usage.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// UsageClient is the compute Client
-type UsageClient struct {
- BaseClient
-}
-
-// NewUsageClient creates an instance of the UsageClient client.
-func NewUsageClient(subscriptionID string) UsageClient {
- return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewUsageClientWithBaseURI creates an instance of the UsageClient client using a custom endpoint. Use this when
-// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient {
- return UsageClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// List gets, for the specified location, the current compute resource usage information as well as the limits for
-// compute resources under the subscription.
-// Parameters:
-// location - the location for which resource usage is queried.
-func (client UsageClient) List(ctx context.Context, location string) (result ListUsagesResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/UsageClient.List")
- defer func() {
- sc := -1
- if result.lur.Response.Response != nil {
- sc = result.lur.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: location,
- Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.UsageClient", "List", err.Error())
- }
-
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.lur.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure sending request")
- return
- }
-
- result.lur, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure responding to request")
- return
- }
- if result.lur.hasNextLink() && result.lur.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client UsageClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client UsageClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client UsageClient) listNextResults(ctx context.Context, lastResults ListUsagesResult) (result ListUsagesResult, err error) {
- req, err := lastResults.listUsagesResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client UsageClient) ListComplete(ctx context.Context, location string) (result ListUsagesResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/UsageClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, location)
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go
deleted file mode 100644
index 56cb9d0c28d7..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/version.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package compute
-
-import "github.com/Azure/azure-sdk-for-go/version"
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-// UserAgent returns the UserAgent string to use when sending http.Requests.
-func UserAgent() string {
- return "Azure-SDK-For-Go/" + Version() + " compute/2021-07-01"
-}
-
-// Version returns the semantic version (see http://semver.org) of the client.
-func Version() string {
- return version.Number
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go
deleted file mode 100644
index e359238e21cb..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensionimages.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineExtensionImagesClient is the compute Client
-type VirtualMachineExtensionImagesClient struct {
- BaseClient
-}
-
-// NewVirtualMachineExtensionImagesClient creates an instance of the VirtualMachineExtensionImagesClient client.
-func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient {
- return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of the VirtualMachineExtensionImagesClient
-// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI
-// (sovereign clouds, Azure stack).
-func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient {
- return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get gets a virtual machine extension image.
-// Parameters:
-// location - the name of a supported Azure region.
-func (client VirtualMachineExtensionImagesClient) Get(ctx context.Context, location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionImagesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, publisherName, typeParameter, version)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Context, location string, publisherName string, typeParameter string, version string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "publisherName": autorest.Encode("path", publisherName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "type": autorest.Encode("path", typeParameter),
- "version": autorest.Encode("path", version),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Response) (result VirtualMachineExtensionImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListTypes gets a list of virtual machine extension image types.
-// Parameters:
-// location - the name of a supported Azure region.
-func (client VirtualMachineExtensionImagesClient) ListTypes(ctx context.Context, location string, publisherName string) (result ListVirtualMachineExtensionImage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionImagesClient.ListTypes")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListTypesPreparer(ctx, location, publisherName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListTypesSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListTypesResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListTypesPreparer prepares the ListTypes request.
-func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context.Context, location string, publisherName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "publisherName": autorest.Encode("path", publisherName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListTypesSender sends the ListTypes request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListTypesResponder handles the response to the ListTypes request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListVersions gets a list of virtual machine extension image versions.
-// Parameters:
-// location - the name of a supported Azure region.
-// filter - the filter to apply on the operation.
-func (client VirtualMachineExtensionImagesClient) ListVersions(ctx context.Context, location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result ListVirtualMachineExtensionImage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionImagesClient.ListVersions")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListVersionsPreparer(ctx, location, publisherName, typeParameter, filter, top, orderby)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListVersionsSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListVersionsResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListVersionsPreparer prepares the ListVersions request.
-func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx context.Context, location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "publisherName": autorest.Encode("path", publisherName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "type": autorest.Encode("path", typeParameter),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(filter) > 0 {
- queryParameters["$filter"] = autorest.Encode("query", filter)
- }
- if top != nil {
- queryParameters["$top"] = autorest.Encode("query", *top)
- }
- if len(orderby) > 0 {
- queryParameters["$orderby"] = autorest.Encode("query", orderby)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListVersionsSender sends the ListVersions request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListVersionsResponder handles the response to the ListVersions request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go
deleted file mode 100644
index a73c28d02725..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineextensions.go
+++ /dev/null
@@ -1,442 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineExtensionsClient is the compute Client
-type VirtualMachineExtensionsClient struct {
- BaseClient
-}
-
-// NewVirtualMachineExtensionsClient creates an instance of the VirtualMachineExtensionsClient client.
-func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient {
- return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the VirtualMachineExtensionsClient client using
-// a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient {
- return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate the operation to create or update the extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine where the extension should be created or updated.
-// VMExtensionName - the name of the virtual machine extension.
-// extensionParameters - parameters supplied to the Create Virtual Machine Extension operation.
-func (client VirtualMachineExtensionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtension) (result VirtualMachineExtensionsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMName, VMExtensionName, extensionParameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtension) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmExtensionName": autorest.Encode("path", VMExtensionName),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters),
- autorest.WithJSON(extensionParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineExtensionsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete the extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine where the extension should be deleted.
-// VMExtensionName - the name of the virtual machine extension.
-func (client VirtualMachineExtensionsClient) Delete(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string) (result VirtualMachineExtensionsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, VMName, VMExtensionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmExtensionName": autorest.Encode("path", VMExtensionName),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineExtensionsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get the operation to get the extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine containing the extension.
-// VMExtensionName - the name of the virtual machine extension.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineExtensionsClient) Get(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, expand string) (result VirtualMachineExtension, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, VMName, VMExtensionName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmExtensionName": autorest.Encode("path", VMExtensionName),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List the operation to get all extensions of a Virtual Machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine containing the extension.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineExtensionsClient) List(ctx context.Context, resourceGroupName string, VMName string, expand string) (result VirtualMachineExtensionsListResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionsClient.List")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListPreparer(ctx, resourceGroupName, VMName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "List", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "List", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineExtensionsClient) ListPreparer(ctx context.Context, resourceGroupName string, VMName string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineExtensionsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineExtensionsListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Update the operation to update the extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine where the extension should be updated.
-// VMExtensionName - the name of the virtual machine extension.
-// extensionParameters - parameters supplied to the Update Virtual Machine Extension operation.
-func (client VirtualMachineExtensionsClient) Update(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtensionUpdate) (result VirtualMachineExtensionsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineExtensionsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, VMName, VMExtensionName, extensionParameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtensionUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmExtensionName": autorest.Encode("path", VMExtensionName),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters),
- autorest.WithJSON(extensionParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineExtensionsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineExtensionsClient) UpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go
deleted file mode 100644
index 95b6c52e34af..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimages.go
+++ /dev/null
@@ -1,432 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineImagesClient is the compute Client
-type VirtualMachineImagesClient struct {
- BaseClient
-}
-
-// NewVirtualMachineImagesClient creates an instance of the VirtualMachineImagesClient client.
-func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient {
- return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineImagesClientWithBaseURI creates an instance of the VirtualMachineImagesClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient {
- return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get gets a virtual machine image.
-// Parameters:
-// location - the name of a supported Azure region.
-// publisherName - a valid image publisher.
-// offer - a valid image publisher offer.
-// skus - a valid image SKU.
-// version - a valid image SKU version.
-func (client VirtualMachineImagesClient) Get(ctx context.Context, location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, publisherName, offer, skus, version)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, location string, publisherName string, offer string, skus string, version string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "offer": autorest.Encode("path", offer),
- "publisherName": autorest.Encode("path", publisherName),
- "skus": autorest.Encode("path", skus),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "version": autorest.Encode("path", version),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (result VirtualMachineImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU.
-// Parameters:
-// location - the name of a supported Azure region.
-// publisherName - a valid image publisher.
-// offer - a valid image publisher offer.
-// skus - a valid image SKU.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineImagesClient) List(ctx context.Context, location string, publisherName string, offer string, skus string, expand string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesClient.List")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListPreparer(ctx, location, publisherName, offer, skus, expand, top, orderby)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, location string, publisherName string, offer string, skus string, expand string, top *int32, orderby string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "offer": autorest.Encode("path", offer),
- "publisherName": autorest.Encode("path", publisherName),
- "skus": autorest.Encode("path", skus),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
- if top != nil {
- queryParameters["$top"] = autorest.Encode("query", *top)
- }
- if len(orderby) > 0 {
- queryParameters["$orderby"] = autorest.Encode("query", orderby)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListOffers gets a list of virtual machine image offers for the specified location and publisher.
-// Parameters:
-// location - the name of a supported Azure region.
-// publisherName - a valid image publisher.
-func (client VirtualMachineImagesClient) ListOffers(ctx context.Context, location string, publisherName string) (result ListVirtualMachineImageResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesClient.ListOffers")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListOffersPreparer(ctx, location, publisherName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListOffersSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListOffersResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListOffersPreparer prepares the ListOffers request.
-func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context, location string, publisherName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "publisherName": autorest.Encode("path", publisherName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListOffersSender sends the ListOffers request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListOffersResponder handles the response to the ListOffers request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListPublishers gets a list of virtual machine image publishers for the specified Azure location.
-// Parameters:
-// location - the name of a supported Azure region.
-func (client VirtualMachineImagesClient) ListPublishers(ctx context.Context, location string) (result ListVirtualMachineImageResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesClient.ListPublishers")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListPublishersPreparer(ctx, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListPublishersSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListPublishersResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListPublishersPreparer prepares the ListPublishers request.
-func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Context, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListPublishersSender sends the ListPublishers request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListPublishersResponder handles the response to the ListPublishers request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListSkus gets a list of virtual machine image SKUs for the specified location, publisher, and offer.
-// Parameters:
-// location - the name of a supported Azure region.
-// publisherName - a valid image publisher.
-// offer - a valid image publisher offer.
-func (client VirtualMachineImagesClient) ListSkus(ctx context.Context, location string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesClient.ListSkus")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListSkusPreparer(ctx, location, publisherName, offer)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSkusSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListSkusResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListSkusPreparer prepares the ListSkus request.
-func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, location string, publisherName string, offer string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "offer": autorest.Encode("path", offer),
- "publisherName": autorest.Encode("path", publisherName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSkusSender sends the ListSkus request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListSkusResponder handles the response to the ListSkus request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go
deleted file mode 100644
index 8fb48bf680d2..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineimagesedgezone.go
+++ /dev/null
@@ -1,445 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineImagesEdgeZoneClient is the compute Client
-type VirtualMachineImagesEdgeZoneClient struct {
- BaseClient
-}
-
-// NewVirtualMachineImagesEdgeZoneClient creates an instance of the VirtualMachineImagesEdgeZoneClient client.
-func NewVirtualMachineImagesEdgeZoneClient(subscriptionID string) VirtualMachineImagesEdgeZoneClient {
- return NewVirtualMachineImagesEdgeZoneClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineImagesEdgeZoneClientWithBaseURI creates an instance of the VirtualMachineImagesEdgeZoneClient
-// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI
-// (sovereign clouds, Azure stack).
-func NewVirtualMachineImagesEdgeZoneClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesEdgeZoneClient {
- return VirtualMachineImagesEdgeZoneClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Get gets a virtual machine image in an edge zone.
-// Parameters:
-// location - the name of a supported Azure region.
-// edgeZone - the name of the edge zone.
-// publisherName - a valid image publisher.
-// offer - a valid image publisher offer.
-// skus - a valid image SKU.
-// version - a valid image SKU version.
-func (client VirtualMachineImagesEdgeZoneClient) Get(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesEdgeZoneClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, location, edgeZone, publisherName, offer, skus, version)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineImagesEdgeZoneClient) GetPreparer(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, version string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "edgeZone": autorest.Encode("path", edgeZone),
- "location": autorest.Encode("path", location),
- "offer": autorest.Encode("path", offer),
- "publisherName": autorest.Encode("path", publisherName),
- "skus": autorest.Encode("path", skus),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "version": autorest.Encode("path", version),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesEdgeZoneClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesEdgeZoneClient) GetResponder(resp *http.Response) (result VirtualMachineImage, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets a list of all virtual machine image versions for the specified location, edge zone, publisher, offer, and
-// SKU.
-// Parameters:
-// location - the name of a supported Azure region.
-// edgeZone - the name of the edge zone.
-// publisherName - a valid image publisher.
-// offer - a valid image publisher offer.
-// skus - a valid image SKU.
-// expand - the expand expression to apply on the operation.
-// top - an integer value specifying the number of images to return that matches supplied values.
-// orderby - specifies the order of the results returned. Formatted as an OData query.
-func (client VirtualMachineImagesEdgeZoneClient) List(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, expand string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesEdgeZoneClient.List")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListPreparer(ctx, location, edgeZone, publisherName, offer, skus, expand, top, orderby)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "List", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "List", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineImagesEdgeZoneClient) ListPreparer(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, expand string, top *int32, orderby string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "edgeZone": autorest.Encode("path", edgeZone),
- "location": autorest.Encode("path", location),
- "offer": autorest.Encode("path", offer),
- "publisherName": autorest.Encode("path", publisherName),
- "skus": autorest.Encode("path", skus),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
- if top != nil {
- queryParameters["$top"] = autorest.Encode("query", *top)
- }
- if len(orderby) > 0 {
- queryParameters["$orderby"] = autorest.Encode("query", orderby)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesEdgeZoneClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesEdgeZoneClient) ListResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListOffers gets a list of virtual machine image offers for the specified location, edge zone and publisher.
-// Parameters:
-// location - the name of a supported Azure region.
-// edgeZone - the name of the edge zone.
-// publisherName - a valid image publisher.
-func (client VirtualMachineImagesEdgeZoneClient) ListOffers(ctx context.Context, location string, edgeZone string, publisherName string) (result ListVirtualMachineImageResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesEdgeZoneClient.ListOffers")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListOffersPreparer(ctx, location, edgeZone, publisherName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListOffers", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListOffersSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListOffers", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListOffersResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListOffers", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListOffersPreparer prepares the ListOffers request.
-func (client VirtualMachineImagesEdgeZoneClient) ListOffersPreparer(ctx context.Context, location string, edgeZone string, publisherName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "edgeZone": autorest.Encode("path", edgeZone),
- "location": autorest.Encode("path", location),
- "publisherName": autorest.Encode("path", publisherName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListOffersSender sends the ListOffers request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesEdgeZoneClient) ListOffersSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListOffersResponder handles the response to the ListOffers request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesEdgeZoneClient) ListOffersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListPublishers gets a list of virtual machine image publishers for the specified Azure location and edge zone.
-// Parameters:
-// location - the name of a supported Azure region.
-// edgeZone - the name of the edge zone.
-func (client VirtualMachineImagesEdgeZoneClient) ListPublishers(ctx context.Context, location string, edgeZone string) (result ListVirtualMachineImageResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesEdgeZoneClient.ListPublishers")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListPublishersPreparer(ctx, location, edgeZone)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListPublishers", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListPublishersSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListPublishers", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListPublishersResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListPublishers", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListPublishersPreparer prepares the ListPublishers request.
-func (client VirtualMachineImagesEdgeZoneClient) ListPublishersPreparer(ctx context.Context, location string, edgeZone string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "edgeZone": autorest.Encode("path", edgeZone),
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListPublishersSender sends the ListPublishers request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesEdgeZoneClient) ListPublishersSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListPublishersResponder handles the response to the ListPublishers request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesEdgeZoneClient) ListPublishersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListSkus gets a list of virtual machine image SKUs for the specified location, edge zone, publisher, and offer.
-// Parameters:
-// location - the name of a supported Azure region.
-// edgeZone - the name of the edge zone.
-// publisherName - a valid image publisher.
-// offer - a valid image publisher offer.
-func (client VirtualMachineImagesEdgeZoneClient) ListSkus(ctx context.Context, location string, edgeZone string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesEdgeZoneClient.ListSkus")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListSkusPreparer(ctx, location, edgeZone, publisherName, offer)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListSkus", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSkusSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListSkus", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListSkusResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesEdgeZoneClient", "ListSkus", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListSkusPreparer prepares the ListSkus request.
-func (client VirtualMachineImagesEdgeZoneClient) ListSkusPreparer(ctx context.Context, location string, edgeZone string, publisherName string, offer string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "edgeZone": autorest.Encode("path", edgeZone),
- "location": autorest.Encode("path", location),
- "offer": autorest.Encode("path", offer),
- "publisherName": autorest.Encode("path", publisherName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSkusSender sends the ListSkus request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineImagesEdgeZoneClient) ListSkusSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListSkusResponder handles the response to the ListSkus request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineImagesEdgeZoneClient) ListSkusResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go
deleted file mode 100644
index 5a2f07fafb75..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachineruncommands.go
+++ /dev/null
@@ -1,689 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineRunCommandsClient is the compute Client
-type VirtualMachineRunCommandsClient struct {
- BaseClient
-}
-
-// NewVirtualMachineRunCommandsClient creates an instance of the VirtualMachineRunCommandsClient client.
-func NewVirtualMachineRunCommandsClient(subscriptionID string) VirtualMachineRunCommandsClient {
- return NewVirtualMachineRunCommandsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineRunCommandsClientWithBaseURI creates an instance of the VirtualMachineRunCommandsClient client
-// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewVirtualMachineRunCommandsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineRunCommandsClient {
- return VirtualMachineRunCommandsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate the operation to create or update the run command.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine where the run command should be created or updated.
-// runCommandName - the name of the virtual machine run command.
-// runCommand - parameters supplied to the Create Virtual Machine RunCommand operation.
-func (client VirtualMachineRunCommandsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, runCommandName string, runCommand VirtualMachineRunCommand) (result VirtualMachineRunCommandsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMName, runCommandName, runCommand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client VirtualMachineRunCommandsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, runCommandName string, runCommand VirtualMachineRunCommand) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "runCommandName": autorest.Encode("path", runCommandName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName}", pathParameters),
- autorest.WithJSON(runCommand),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineRunCommandsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineRunCommandsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineRunCommandsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineRunCommand, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete the run command.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine where the run command should be deleted.
-// runCommandName - the name of the virtual machine run command.
-func (client VirtualMachineRunCommandsClient) Delete(ctx context.Context, resourceGroupName string, VMName string, runCommandName string) (result VirtualMachineRunCommandsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, VMName, runCommandName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client VirtualMachineRunCommandsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMName string, runCommandName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "runCommandName": autorest.Encode("path", runCommandName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineRunCommandsClient) DeleteSender(req *http.Request) (future VirtualMachineRunCommandsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineRunCommandsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get gets specific run command for a subscription in a location.
-// Parameters:
-// location - the location upon which run commands is queried.
-// commandID - the command id.
-func (client VirtualMachineRunCommandsClient) Get(ctx context.Context, location string, commandID string) (result RunCommandDocument, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: location,
- Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineRunCommandsClient", "Get", err.Error())
- }
-
- req, err := client.GetPreparer(ctx, location, commandID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, location string, commandID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "commandId": autorest.Encode("path", commandID),
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineRunCommandsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineRunCommandsClient) GetResponder(resp *http.Response) (result RunCommandDocument, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetByVirtualMachine the operation to get the run command.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine containing the run command.
-// runCommandName - the name of the virtual machine run command.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineRunCommandsClient) GetByVirtualMachine(ctx context.Context, resourceGroupName string, VMName string, runCommandName string, expand string) (result VirtualMachineRunCommand, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.GetByVirtualMachine")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetByVirtualMachinePreparer(ctx, resourceGroupName, VMName, runCommandName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "GetByVirtualMachine", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetByVirtualMachineSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "GetByVirtualMachine", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetByVirtualMachineResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "GetByVirtualMachine", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetByVirtualMachinePreparer prepares the GetByVirtualMachine request.
-func (client VirtualMachineRunCommandsClient) GetByVirtualMachinePreparer(ctx context.Context, resourceGroupName string, VMName string, runCommandName string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "runCommandName": autorest.Encode("path", runCommandName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetByVirtualMachineSender sends the GetByVirtualMachine request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineRunCommandsClient) GetByVirtualMachineSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetByVirtualMachineResponder handles the response to the GetByVirtualMachine request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineRunCommandsClient) GetByVirtualMachineResponder(resp *http.Response) (result VirtualMachineRunCommand, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List lists all available run commands for a subscription in a location.
-// Parameters:
-// location - the location upon which run commands is queried.
-func (client VirtualMachineRunCommandsClient) List(ctx context.Context, location string) (result RunCommandListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.List")
- defer func() {
- sc := -1
- if result.rclr.Response.Response != nil {
- sc = result.rclr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: location,
- Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineRunCommandsClient", "List", err.Error())
- }
-
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.rclr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.rclr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.rclr.hasNextLink() && result.rclr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineRunCommandsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineRunCommandsClient) ListResponder(resp *http.Response) (result RunCommandListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client VirtualMachineRunCommandsClient) listNextResults(ctx context.Context, lastResults RunCommandListResult) (result RunCommandListResult, err error) {
- req, err := lastResults.runCommandListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineRunCommandsClient) ListComplete(ctx context.Context, location string) (result RunCommandListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, location)
- return
-}
-
-// ListByVirtualMachine the operation to get all run commands of a Virtual Machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine containing the run command.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineRunCommandsClient) ListByVirtualMachine(ctx context.Context, resourceGroupName string, VMName string, expand string) (result VirtualMachineRunCommandsListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.ListByVirtualMachine")
- defer func() {
- sc := -1
- if result.vmrclr.Response.Response != nil {
- sc = result.vmrclr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listByVirtualMachineNextResults
- req, err := client.ListByVirtualMachinePreparer(ctx, resourceGroupName, VMName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "ListByVirtualMachine", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByVirtualMachineSender(req)
- if err != nil {
- result.vmrclr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "ListByVirtualMachine", resp, "Failure sending request")
- return
- }
-
- result.vmrclr, err = client.ListByVirtualMachineResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "ListByVirtualMachine", resp, "Failure responding to request")
- return
- }
- if result.vmrclr.hasNextLink() && result.vmrclr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByVirtualMachinePreparer prepares the ListByVirtualMachine request.
-func (client VirtualMachineRunCommandsClient) ListByVirtualMachinePreparer(ctx context.Context, resourceGroupName string, VMName string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByVirtualMachineSender sends the ListByVirtualMachine request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineRunCommandsClient) ListByVirtualMachineSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByVirtualMachineResponder handles the response to the ListByVirtualMachine request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineRunCommandsClient) ListByVirtualMachineResponder(resp *http.Response) (result VirtualMachineRunCommandsListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByVirtualMachineNextResults retrieves the next set of results, if any.
-func (client VirtualMachineRunCommandsClient) listByVirtualMachineNextResults(ctx context.Context, lastResults VirtualMachineRunCommandsListResult) (result VirtualMachineRunCommandsListResult, err error) {
- req, err := lastResults.virtualMachineRunCommandsListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listByVirtualMachineNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByVirtualMachineSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listByVirtualMachineNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByVirtualMachineResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listByVirtualMachineNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByVirtualMachineComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineRunCommandsClient) ListByVirtualMachineComplete(ctx context.Context, resourceGroupName string, VMName string, expand string) (result VirtualMachineRunCommandsListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.ListByVirtualMachine")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByVirtualMachine(ctx, resourceGroupName, VMName, expand)
- return
-}
-
-// Update the operation to update the run command.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine where the run command should be updated.
-// runCommandName - the name of the virtual machine run command.
-// runCommand - parameters supplied to the Update Virtual Machine RunCommand operation.
-func (client VirtualMachineRunCommandsClient) Update(ctx context.Context, resourceGroupName string, VMName string, runCommandName string, runCommand VirtualMachineRunCommandUpdate) (result VirtualMachineRunCommandsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineRunCommandsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, VMName, runCommandName, runCommand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client VirtualMachineRunCommandsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, runCommandName string, runCommand VirtualMachineRunCommandUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "runCommandName": autorest.Encode("path", runCommandName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName}", pathParameters),
- autorest.WithJSON(runCommand),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineRunCommandsClient) UpdateSender(req *http.Request) (future VirtualMachineRunCommandsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineRunCommandsClient) UpdateResponder(resp *http.Response) (result VirtualMachineRunCommand, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go
deleted file mode 100644
index d1aff5e7573b..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachines.go
+++ /dev/null
@@ -1,2193 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachinesClient is the compute Client
-type VirtualMachinesClient struct {
- BaseClient
-}
-
-// NewVirtualMachinesClient creates an instance of the VirtualMachinesClient client.
-func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient {
- return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachinesClientWithBaseURI creates an instance of the VirtualMachinesClient client using a custom endpoint.
-// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient {
- return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// AssessPatches assess patches on the VM.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) AssessPatches(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesAssessPatchesFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.AssessPatches")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.AssessPatchesPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "AssessPatches", nil, "Failure preparing request")
- return
- }
-
- result, err = client.AssessPatchesSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "AssessPatches", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// AssessPatchesPreparer prepares the AssessPatches request.
-func (client VirtualMachinesClient) AssessPatchesPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// AssessPatchesSender sends the AssessPatches request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) AssessPatchesSender(req *http.Request) (future VirtualMachinesAssessPatchesFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// AssessPatchesResponder handles the response to the AssessPatches request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) AssessPatchesResponder(resp *http.Response) (result VirtualMachineAssessPatchesResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Capture captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create
-// similar VMs.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// parameters - parameters supplied to the Capture Virtual Machine operation.
-func (client VirtualMachinesClient) Capture(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachineCaptureParameters) (result VirtualMachinesCaptureFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Capture")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.VhdPrefix", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.DestinationContainerName", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.OverwriteVhds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachinesClient", "Capture", err.Error())
- }
-
- req, err := client.CapturePreparer(ctx, resourceGroupName, VMName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CaptureSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CapturePreparer prepares the Capture request.
-func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachineCaptureParameters) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CaptureSender sends the Capture request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future VirtualMachinesCaptureFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CaptureResponder handles the response to the Capture request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result VirtualMachineCaptureResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ConvertToManagedDisks converts virtual machine disks from blob-based to managed disks. Virtual machine must be
-// stop-deallocated before invoking this operation.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) ConvertToManagedDisks(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesConvertToManagedDisksFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ConvertToManagedDisks")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ConvertToManagedDisksPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ConvertToManagedDisksSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ConvertToManagedDisksPreparer prepares the ConvertToManagedDisks request.
-func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ConvertToManagedDisksSender sends the ConvertToManagedDisks request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Request) (future VirtualMachinesConvertToManagedDisksFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ConvertToManagedDisksResponder handles the response to the ConvertToManagedDisks request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) ConvertToManagedDisksResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// CreateOrUpdate the operation to create or update a virtual machine. Please note some properties can be set only
-// during virtual machine creation.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// parameters - parameters supplied to the Create Virtual Machine operation.
-func (client VirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachine) (result VirtualMachinesCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
- }},
- {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
- }},
- }},
- }},
- }},
- }}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachinesClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachine) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- parameters.Resources = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachinesCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachine, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Deallocate shuts down the virtual machine and releases the compute resources. You are not billed for the compute
-// resources that this virtual machine uses.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// hibernate - optional parameter to hibernate a virtual machine. (Feature in Preview)
-func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGroupName string, VMName string, hibernate *bool) (result VirtualMachinesDeallocateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Deallocate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMName, hibernate)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeallocateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeallocatePreparer prepares the Deallocate request.
-func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMName string, hibernate *bool) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if hibernate != nil {
- queryParameters["hibernate"] = autorest.Encode("query", *hibernate)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeallocateSender sends the Deallocate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future VirtualMachinesDeallocateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeallocateResponder handles the response to the Deallocate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Delete the operation to delete a virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// forceDeletion - optional parameter to force delete virtual machines.(Feature in Preview)
-func (client VirtualMachinesClient) Delete(ctx context.Context, resourceGroupName string, VMName string, forceDeletion *bool) (result VirtualMachinesDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, VMName, forceDeletion)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMName string, forceDeletion *bool) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if forceDeletion != nil {
- queryParameters["forceDeletion"] = autorest.Encode("query", *forceDeletion)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future VirtualMachinesDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Generalize sets the OS state of the virtual machine to generalized. It is recommended to sysprep the virtual machine
-// before performing this operation.
For Windows, please refer to [Create a managed image of a generalized VM in
-// Azure](https://docs.microsoft.com/azure/virtual-machines/windows/capture-image-resource).
For Linux, please refer
-// to [How to create an image of a virtual machine or
-// VHD](https://docs.microsoft.com/azure/virtual-machines/linux/capture-image).
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) Generalize(ctx context.Context, resourceGroupName string, VMName string) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Generalize")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GeneralizePreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GeneralizeSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure sending request")
- return
- }
-
- result, err = client.GeneralizeResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GeneralizePreparer prepares the Generalize request.
-func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GeneralizeSender sends the Generalize request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GeneralizeResponder handles the response to the Generalize request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get retrieves information about the model view or the instance view of a virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// expand - the expand expression to apply on the operation. 'InstanceView' retrieves a snapshot of the runtime
-// properties of the virtual machine that is managed by the platform and can change outside of control plane
-// operations. 'UserData' retrieves the UserData property as part of the VM model view that was provided by the
-// user during the VM Create/Update operation.
-func (client VirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand InstanceViewTypes) (result VirtualMachine, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, VMName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGroupName string, VMName string, expand InstanceViewTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result VirtualMachine, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// InstallPatches installs patches on the VM.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// installPatchesInput - input for InstallPatches as directly received by the API
-func (client VirtualMachinesClient) InstallPatches(ctx context.Context, resourceGroupName string, VMName string, installPatchesInput VirtualMachineInstallPatchesParameters) (result VirtualMachinesInstallPatchesFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.InstallPatches")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.InstallPatchesPreparer(ctx, resourceGroupName, VMName, installPatchesInput)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstallPatches", nil, "Failure preparing request")
- return
- }
-
- result, err = client.InstallPatchesSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstallPatches", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// InstallPatchesPreparer prepares the InstallPatches request.
-func (client VirtualMachinesClient) InstallPatchesPreparer(ctx context.Context, resourceGroupName string, VMName string, installPatchesInput VirtualMachineInstallPatchesParameters) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches", pathParameters),
- autorest.WithJSON(installPatchesInput),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// InstallPatchesSender sends the InstallPatches request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) InstallPatchesSender(req *http.Request) (future VirtualMachinesInstallPatchesFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// InstallPatchesResponder handles the response to the InstallPatches request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) InstallPatchesResponder(resp *http.Response) (result VirtualMachineInstallPatchesResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// InstanceView retrieves information about the run-time state of a virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) InstanceView(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachineInstanceView, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.InstanceView")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.InstanceViewPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.InstanceViewSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", resp, "Failure sending request")
- return
- }
-
- result, err = client.InstanceViewResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// InstanceViewPreparer prepares the InstanceView request.
-func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// InstanceViewSender sends the InstanceView request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) InstanceViewSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// InstanceViewResponder handles the response to the InstanceView request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) InstanceViewResponder(resp *http.Response) (result VirtualMachineInstanceView, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List lists all of the virtual machines in the specified resource group. Use the nextLink property in the response to
-// get the next page of virtual machines.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client VirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result VirtualMachineListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.List")
- defer func() {
- sc := -1
- if result.vmlr.Response.Response != nil {
- sc = result.vmlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.vmlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending request")
- return
- }
-
- result.vmlr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to request")
- return
- }
- if result.vmlr.hasNextLink() && result.vmlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) ListResponder(resp *http.Response) (result VirtualMachineListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client VirtualMachinesClient) listNextResults(ctx context.Context, lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) {
- req, err := lastResults.virtualMachineListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachinesClient) ListComplete(ctx context.Context, resourceGroupName string) (result VirtualMachineListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName)
- return
-}
-
-// ListAll lists all of the virtual machines in the specified subscription. Use the nextLink property in the response
-// to get the next page of virtual machines.
-// Parameters:
-// statusOnly - statusOnly=true enables fetching run time status of all Virtual Machines in the subscription.
-func (client VirtualMachinesClient) ListAll(ctx context.Context, statusOnly string) (result VirtualMachineListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListAll")
- defer func() {
- sc := -1
- if result.vmlr.Response.Response != nil {
- sc = result.vmlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listAllNextResults
- req, err := client.ListAllPreparer(ctx, statusOnly)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListAllSender(req)
- if err != nil {
- result.vmlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending request")
- return
- }
-
- result.vmlr, err = client.ListAllResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure responding to request")
- return
- }
- if result.vmlr.hasNextLink() && result.vmlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListAllPreparer prepares the ListAll request.
-func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context, statusOnly string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(statusOnly) > 0 {
- queryParameters["statusOnly"] = autorest.Encode("query", statusOnly)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListAllSender sends the ListAll request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListAllResponder handles the response to the ListAll request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) ListAllResponder(resp *http.Response) (result VirtualMachineListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listAllNextResults retrieves the next set of results, if any.
-func (client VirtualMachinesClient) listAllNextResults(ctx context.Context, lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) {
- req, err := lastResults.virtualMachineListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listAllNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListAllSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listAllNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListAllResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listAllNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachinesClient) ListAllComplete(ctx context.Context, statusOnly string) (result VirtualMachineListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListAll")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListAll(ctx, statusOnly)
- return
-}
-
-// ListAvailableSizes lists all available virtual machine sizes to which the specified virtual machine can be resized.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) ListAvailableSizes(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachineSizeListResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListAvailableSizes")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListAvailableSizesPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListAvailableSizesSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListAvailableSizesResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListAvailableSizesPreparer prepares the ListAvailableSizes request.
-func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// ListByLocation gets all the virtual machines under the specified subscription for the specified location.
-// Parameters:
-// location - the location for which virtual machines under the subscription are queried.
-func (client VirtualMachinesClient) ListByLocation(ctx context.Context, location string) (result VirtualMachineListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListByLocation")
- defer func() {
- sc := -1
- if result.vmlr.Response.Response != nil {
- sc = result.vmlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: location,
- Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachinesClient", "ListByLocation", err.Error())
- }
-
- result.fn = client.listByLocationNextResults
- req, err := client.ListByLocationPreparer(ctx, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListByLocation", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByLocationSender(req)
- if err != nil {
- result.vmlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListByLocation", resp, "Failure sending request")
- return
- }
-
- result.vmlr, err = client.ListByLocationResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListByLocation", resp, "Failure responding to request")
- return
- }
- if result.vmlr.hasNextLink() && result.vmlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByLocationPreparer prepares the ListByLocation request.
-func (client VirtualMachinesClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByLocationSender sends the ListByLocation request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) ListByLocationSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByLocationResponder handles the response to the ListByLocation request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) ListByLocationResponder(resp *http.Response) (result VirtualMachineListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByLocationNextResults retrieves the next set of results, if any.
-func (client VirtualMachinesClient) listByLocationNextResults(ctx context.Context, lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) {
- req, err := lastResults.virtualMachineListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listByLocationNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByLocationSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listByLocationNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByLocationResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listByLocationNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByLocationComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachinesClient) ListByLocationComplete(ctx context.Context, location string) (result VirtualMachineListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.ListByLocation")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByLocation(ctx, location)
- return
-}
-
-// PerformMaintenance the operation to perform maintenance on a virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) PerformMaintenance(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesPerformMaintenanceFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.PerformMaintenance")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.PerformMaintenancePreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", nil, "Failure preparing request")
- return
- }
-
- result, err = client.PerformMaintenanceSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// PerformMaintenancePreparer prepares the PerformMaintenance request.
-func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// PerformMaintenanceSender sends the PerformMaintenance request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachinesPerformMaintenanceFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// PerformMaintenanceResponder handles the response to the PerformMaintenance request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) PerformMaintenanceResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// PowerOff the operation to power off (stop) a virtual machine. The virtual machine can be restarted with the same
-// provisioned resources. You are still charged for this virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// skipShutdown - the parameter to request non-graceful VM shutdown. True value for this flag indicates
-// non-graceful shutdown whereas false indicates otherwise. Default value for this flag is false if not
-// specified
-func (client VirtualMachinesClient) PowerOff(ctx context.Context, resourceGroupName string, VMName string, skipShutdown *bool) (result VirtualMachinesPowerOffFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.PowerOff")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMName, skipShutdown)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure preparing request")
- return
- }
-
- result, err = client.PowerOffSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// PowerOffPreparer prepares the PowerOff request.
-func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMName string, skipShutdown *bool) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if skipShutdown != nil {
- queryParameters["skipShutdown"] = autorest.Encode("query", *skipShutdown)
- } else {
- queryParameters["skipShutdown"] = autorest.Encode("query", false)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// PowerOffSender sends the PowerOff request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future VirtualMachinesPowerOffFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// PowerOffResponder handles the response to the PowerOff request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Reapply the operation to reapply a virtual machine's state.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) Reapply(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesReapplyFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Reapply")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ReapplyPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reapply", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ReapplySender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reapply", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ReapplyPreparer prepares the Reapply request.
-func (client VirtualMachinesClient) ReapplyPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ReapplySender sends the Reapply request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) ReapplySender(req *http.Request) (future VirtualMachinesReapplyFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ReapplyResponder handles the response to the Reapply request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) ReapplyResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Redeploy shuts down the virtual machine, moves it to a new node, and powers it back on.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) Redeploy(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesRedeployFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Redeploy")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RedeployPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RedeploySender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RedeployPreparer prepares the Redeploy request.
-func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RedeploySender sends the Redeploy request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future VirtualMachinesRedeployFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RedeployResponder handles the response to the Redeploy request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Reimage reimages the virtual machine which has an ephemeral OS disk back to its initial state.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// parameters - parameters supplied to the Reimage Virtual Machine operation.
-func (client VirtualMachinesClient) Reimage(ctx context.Context, resourceGroupName string, VMName string, parameters *VirtualMachineReimageParameters) (result VirtualMachinesReimageFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Reimage")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ReimagePreparer(ctx, resourceGroupName, VMName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reimage", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ReimageSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reimage", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ReimagePreparer prepares the Reimage request.
-func (client VirtualMachinesClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMName string, parameters *VirtualMachineReimageParameters) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if parameters != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(parameters))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ReimageSender sends the Reimage request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) ReimageSender(req *http.Request) (future VirtualMachinesReimageFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ReimageResponder handles the response to the Reimage request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Restart the operation to restart a virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) Restart(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesRestartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Restart")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RestartPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RestartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RestartPreparer prepares the Restart request.
-func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RestartSender sends the Restart request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) RestartSender(req *http.Request) (future VirtualMachinesRestartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RestartResponder handles the response to the Restart request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// RetrieveBootDiagnosticsData the operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// sasURIExpirationTimeInMinutes - expiration duration in minutes for the SAS URIs with a value between 1 to
-// 1440 minutes.
NOTE: If not specified, SAS URIs will be generated with a default expiration duration
-// of 120 minutes.
-func (client VirtualMachinesClient) RetrieveBootDiagnosticsData(ctx context.Context, resourceGroupName string, VMName string, sasURIExpirationTimeInMinutes *int32) (result RetrieveBootDiagnosticsDataResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.RetrieveBootDiagnosticsData")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RetrieveBootDiagnosticsDataPreparer(ctx, resourceGroupName, VMName, sasURIExpirationTimeInMinutes)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RetrieveBootDiagnosticsData", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.RetrieveBootDiagnosticsDataSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RetrieveBootDiagnosticsData", resp, "Failure sending request")
- return
- }
-
- result, err = client.RetrieveBootDiagnosticsDataResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RetrieveBootDiagnosticsData", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// RetrieveBootDiagnosticsDataPreparer prepares the RetrieveBootDiagnosticsData request.
-func (client VirtualMachinesClient) RetrieveBootDiagnosticsDataPreparer(ctx context.Context, resourceGroupName string, VMName string, sasURIExpirationTimeInMinutes *int32) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if sasURIExpirationTimeInMinutes != nil {
- queryParameters["sasUriExpirationTimeInMinutes"] = autorest.Encode("query", *sasURIExpirationTimeInMinutes)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/retrieveBootDiagnosticsData", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RetrieveBootDiagnosticsDataSender sends the RetrieveBootDiagnosticsData request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) RetrieveBootDiagnosticsDataSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// RetrieveBootDiagnosticsDataResponder handles the response to the RetrieveBootDiagnosticsData request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) RetrieveBootDiagnosticsDataResponder(resp *http.Response) (result RetrieveBootDiagnosticsDataResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// RunCommand run command on the VM.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// parameters - parameters supplied to the Run command operation.
-func (client VirtualMachinesClient) RunCommand(ctx context.Context, resourceGroupName string, VMName string, parameters RunCommandInput) (result VirtualMachinesRunCommandFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.RunCommand")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.CommandID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachinesClient", "RunCommand", err.Error())
- }
-
- req, err := client.RunCommandPreparer(ctx, resourceGroupName, VMName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RunCommandSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RunCommandPreparer prepares the RunCommand request.
-func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, resourceGroupName string, VMName string, parameters RunCommandInput) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RunCommandSender sends the RunCommand request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future VirtualMachinesRunCommandFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RunCommandResponder handles the response to the RunCommand request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) RunCommandResponder(resp *http.Response) (result RunCommandResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// SimulateEviction the operation to simulate the eviction of spot virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) SimulateEviction(ctx context.Context, resourceGroupName string, VMName string) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.SimulateEviction")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.SimulateEvictionPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "SimulateEviction", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.SimulateEvictionSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "SimulateEviction", resp, "Failure sending request")
- return
- }
-
- result, err = client.SimulateEvictionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "SimulateEviction", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// SimulateEvictionPreparer prepares the SimulateEviction request.
-func (client VirtualMachinesClient) SimulateEvictionPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/simulateEviction", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// SimulateEvictionSender sends the SimulateEviction request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) SimulateEvictionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// SimulateEvictionResponder handles the response to the SimulateEviction request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) SimulateEvictionResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Start the operation to start a virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-func (client VirtualMachinesClient) Start(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesStartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Start")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.StartPreparer(ctx, resourceGroupName, VMName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", nil, "Failure preparing request")
- return
- }
-
- result, err = client.StartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// StartPreparer prepares the Start request.
-func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// StartSender sends the Start request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) StartSender(req *http.Request) (future VirtualMachinesStartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// StartResponder handles the response to the Start request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Update the operation to update a virtual machine.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMName - the name of the virtual machine.
-// parameters - parameters supplied to the Update Virtual Machine operation.
-func (client VirtualMachinesClient) Update(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachineUpdate) (result VirtualMachinesUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachinesClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, VMName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachineUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmName": autorest.Encode("path", VMName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachinesClient) UpdateSender(req *http.Request) (future VirtualMachinesUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client VirtualMachinesClient) UpdateResponder(resp *http.Response) (result VirtualMachine, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go
deleted file mode 100644
index 9e71f3ba9100..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetextensions.go
+++ /dev/null
@@ -1,483 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineScaleSetExtensionsClient is the compute Client
-type VirtualMachineScaleSetExtensionsClient struct {
- BaseClient
-}
-
-// NewVirtualMachineScaleSetExtensionsClient creates an instance of the VirtualMachineScaleSetExtensionsClient client.
-func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string) VirtualMachineScaleSetExtensionsClient {
- return NewVirtualMachineScaleSetExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineScaleSetExtensionsClientWithBaseURI creates an instance of the
-// VirtualMachineScaleSetExtensionsClient client using a custom endpoint. Use this when interacting with an Azure
-// cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewVirtualMachineScaleSetExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetExtensionsClient {
- return VirtualMachineScaleSetExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate the operation to create or update an extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set where the extension should be create or updated.
-// vmssExtensionName - the name of the VM scale set extension.
-// extensionParameters - parameters supplied to the Create VM scale set Extension operation.
-func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension) (result VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName, extensionParameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- "vmssExtensionName": autorest.Encode("path", vmssExtensionName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- extensionParameters.Type = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters),
- autorest.WithJSON(extensionParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete the extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set where the extension should be deleted.
-// vmssExtensionName - the name of the VM scale set extension.
-func (client VirtualMachineScaleSetExtensionsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string) (result VirtualMachineScaleSetExtensionsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- "vmssExtensionName": autorest.Encode("path", vmssExtensionName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetExtensionsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get the operation to get the extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set containing the extension.
-// vmssExtensionName - the name of the VM scale set extension.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineScaleSetExtensionsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (result VirtualMachineScaleSetExtension, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- "vmssExtensionName": autorest.Encode("path", vmssExtensionName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetExtensionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets a list of all extensions in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set containing the extension.
-func (client VirtualMachineScaleSetExtensionsClient) List(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetExtensionListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.List")
- defer func() {
- sc := -1
- if result.vmsselr.Response.Response != nil {
- sc = result.vmsselr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName, VMScaleSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.vmsselr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.vmsselr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.vmsselr.hasNextLink() && result.vmsselr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetExtensionsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetExtensionListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client VirtualMachineScaleSetExtensionsClient) listNextResults(ctx context.Context, lastResults VirtualMachineScaleSetExtensionListResult) (result VirtualMachineScaleSetExtensionListResult, err error) {
- req, err := lastResults.virtualMachineScaleSetExtensionListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineScaleSetExtensionsClient) ListComplete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetExtensionListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName, VMScaleSetName)
- return
-}
-
-// Update the operation to update an extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set where the extension should be updated.
-// vmssExtensionName - the name of the VM scale set extension.
-// extensionParameters - parameters supplied to the Update VM scale set Extension operation.
-func (client VirtualMachineScaleSetExtensionsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate) (result VirtualMachineScaleSetExtensionsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName, extensionParameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client VirtualMachineScaleSetExtensionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- "vmssExtensionName": autorest.Encode("path", vmssExtensionName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- extensionParameters.Name = nil
- extensionParameters.Type = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters),
- autorest.WithJSON(extensionParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetExtensionsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetExtensionsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go
deleted file mode 100644
index be9de5194bb5..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetrollingupgrades.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineScaleSetRollingUpgradesClient is the compute Client
-type VirtualMachineScaleSetRollingUpgradesClient struct {
- BaseClient
-}
-
-// NewVirtualMachineScaleSetRollingUpgradesClient creates an instance of the
-// VirtualMachineScaleSetRollingUpgradesClient client.
-func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient {
- return NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI creates an instance of the
-// VirtualMachineScaleSetRollingUpgradesClient client using a custom endpoint. Use this when interacting with an Azure
-// cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient {
- return VirtualMachineScaleSetRollingUpgradesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Cancel cancels the current virtual machine scale set rolling upgrade.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-func (client VirtualMachineScaleSetRollingUpgradesClient) Cancel(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetRollingUpgradesClient.Cancel")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CancelPreparer(ctx, resourceGroupName, VMScaleSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CancelSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CancelPreparer prepares the Cancel request.
-func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CancelSender sends the Cancel request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CancelResponder handles the response to the Cancel request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetRollingUpgradesClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// GetLatest gets the status of the latest virtual machine scale set rolling upgrade.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatest(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result RollingUpgradeStatusInfo, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetRollingUpgradesClient.GetLatest")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetLatestPreparer(ctx, resourceGroupName, VMScaleSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetLatestSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetLatestResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetLatestPreparer prepares the GetLatest request.
-func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetLatestSender sends the GetLatest request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetLatestResponder handles the response to the GetLatest request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestResponder(resp *http.Response) (result RollingUpgradeStatusInfo, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// StartExtensionUpgrade starts a rolling upgrade to move all extensions for all virtual machine scale set instances to
-// the latest available extension version. Instances which are already running the latest extension versions are not
-// affected.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetRollingUpgradesClient.StartExtensionUpgrade")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.StartExtensionUpgradePreparer(ctx, resourceGroupName, VMScaleSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartExtensionUpgrade", nil, "Failure preparing request")
- return
- }
-
- result, err = client.StartExtensionUpgradeSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartExtensionUpgrade", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// StartExtensionUpgradePreparer prepares the StartExtensionUpgrade request.
-func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// StartExtensionUpgradeSender sends the StartExtensionUpgrade request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// StartExtensionUpgradeResponder handles the response to the StartExtensionUpgrade request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// StartOSUpgrade starts a rolling upgrade to move all virtual machine scale set instances to the latest available
-// Platform Image OS version. Instances which are already running the latest available OS version are not affected.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetRollingUpgradesClient.StartOSUpgrade")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.StartOSUpgradePreparer(ctx, resourceGroupName, VMScaleSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", nil, "Failure preparing request")
- return
- }
-
- result, err = client.StartOSUpgradeSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// StartOSUpgradePreparer prepares the StartOSUpgrade request.
-func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// StartOSUpgradeSender sends the StartOSUpgrade request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// StartOSUpgradeResponder handles the response to the StartOSUpgrade request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go
deleted file mode 100644
index 28523a860773..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesets.go
+++ /dev/null
@@ -1,2154 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineScaleSetsClient is the compute Client
-type VirtualMachineScaleSetsClient struct {
- BaseClient
-}
-
-// NewVirtualMachineScaleSetsClient creates an instance of the VirtualMachineScaleSetsClient client.
-func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScaleSetsClient {
- return NewVirtualMachineScaleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the VirtualMachineScaleSetsClient client using a
-// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
-// Azure stack).
-func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient {
- return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// ConvertToSinglePlacementGroup converts SinglePlacementGroup property to false for a existing virtual machine scale
-// set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the virtual machine scale set to create or update.
-// parameters - the input object for ConvertToSinglePlacementGroup API.
-func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroup(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VMScaleSetConvertToSinglePlacementGroupInput) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ConvertToSinglePlacementGroupPreparer(ctx, resourceGroupName, VMScaleSetName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ConvertToSinglePlacementGroup", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ConvertToSinglePlacementGroupSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ConvertToSinglePlacementGroup", resp, "Failure sending request")
- return
- }
-
- result, err = client.ConvertToSinglePlacementGroupResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ConvertToSinglePlacementGroup", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ConvertToSinglePlacementGroupPreparer prepares the ConvertToSinglePlacementGroup request.
-func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VMScaleSetConvertToSinglePlacementGroupInput) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/convertToSinglePlacementGroup", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ConvertToSinglePlacementGroupSender sends the ConvertToSinglePlacementGroup request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ConvertToSinglePlacementGroupResponder handles the response to the ConvertToSinglePlacementGroup request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// CreateOrUpdate create or update a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set to create or update.
-// parameters - the scale set object.
-func (client VirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSet) (result VirtualMachineScaleSetsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
- {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMinimum, Rule: int64(5), Chain: nil},
- }},
- {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
- {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMinimum, Rule: int64(5), Chain: nil},
- }},
- {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
- {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil},
- }},
- }},
- }},
- }}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", err.Error())
- }
-
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMScaleSetName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSet) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Deallocate deallocates specific virtual machines in a VM scale set. Shuts down the virtual machines and releases the
-// compute resources. You are not billed for the compute resources that this virtual machine scale set deallocates.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-func (client VirtualMachineScaleSetsClient) Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsDeallocateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Deallocate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeallocateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeallocatePreparer prepares the Deallocate request.
-func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMInstanceIDs != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMInstanceIDs))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeallocateSender sends the Deallocate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetsDeallocateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeallocateResponder handles the response to the Deallocate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Delete deletes a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// forceDeletion - optional parameter to force delete a VM scale set. (Feature in Preview)
-func (client VirtualMachineScaleSetsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, forceDeletion *bool) (result VirtualMachineScaleSetsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, forceDeletion)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, forceDeletion *bool) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if forceDeletion != nil {
- queryParameters["forceDeletion"] = autorest.Encode("query", *forceDeletion)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// DeleteInstances deletes virtual machines in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-// forceDeletion - optional parameter to force delete virtual machines from the VM scale set. (Feature in
-// Preview)
-func (client VirtualMachineScaleSetsClient) DeleteInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, forceDeletion *bool) (result VirtualMachineScaleSetsDeleteInstancesFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.DeleteInstances")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: VMInstanceIDs,
- Constraints: []validation.Constraint{{Target: "VMInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineScaleSetsClient", "DeleteInstances", err.Error())
- }
-
- req, err := client.DeleteInstancesPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs, forceDeletion)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteInstancesSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeleteInstancesPreparer prepares the DeleteInstances request.
-func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, forceDeletion *bool) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if forceDeletion != nil {
- queryParameters["forceDeletion"] = autorest.Encode("query", *forceDeletion)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete", pathParameters),
- autorest.WithJSON(VMInstanceIDs),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteInstancesSender sends the DeleteInstances request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (future VirtualMachineScaleSetsDeleteInstancesFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteInstancesResponder handles the response to the DeleteInstances request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// ForceRecoveryServiceFabricPlatformUpdateDomainWalk manual platform update domain walk to update virtual machines in
-// a service fabric virtual machine scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// platformUpdateDomain - the platform update domain for which a manual recovery walk is requested
-func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalk(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (result RecoveryWalkResponse, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx, resourceGroupName, VMScaleSetName, platformUpdateDomain)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ForceRecoveryServiceFabricPlatformUpdateDomainWalk", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ForceRecoveryServiceFabricPlatformUpdateDomainWalk", resp, "Failure sending request")
- return
- }
-
- result, err = client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ForceRecoveryServiceFabricPlatformUpdateDomainWalk", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer prepares the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request.
-func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- "platformUpdateDomain": autorest.Encode("query", platformUpdateDomain),
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/forceRecoveryServiceFabricPlatformUpdateDomainWalk", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender sends the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder handles the response to the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder(resp *http.Response) (result RecoveryWalkResponse, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Get display information about a virtual machine scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// expand - the expand expression to apply on the operation. 'UserData' retrieves the UserData property of the
-// VM scale set that was provided by the user during the VM scale set Create/Update operation
-func (client VirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, expand ExpandTypesForGetVMScaleSets) (result VirtualMachineScaleSet, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, expand ExpandTypesForGetVMScaleSets) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetInstanceView gets the status of a VM scale set instance.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-func (client VirtualMachineScaleSetsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.GetInstanceView")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetInstanceViewPreparer(ctx, resourceGroupName, VMScaleSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetInstanceViewSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetInstanceViewResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetInstanceViewPreparer prepares the GetInstanceView request.
-func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetInstanceViewSender sends the GetInstanceView request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetInstanceView, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetOSUpgradeHistory gets list of OS upgrades on a VM scale set instance.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistory(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListOSUpgradeHistoryPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.GetOSUpgradeHistory")
- defer func() {
- sc := -1
- if result.vmsslouh.Response.Response != nil {
- sc = result.vmsslouh.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.getOSUpgradeHistoryNextResults
- req, err := client.GetOSUpgradeHistoryPreparer(ctx, resourceGroupName, VMScaleSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetOSUpgradeHistory", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetOSUpgradeHistorySender(req)
- if err != nil {
- result.vmsslouh.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetOSUpgradeHistory", resp, "Failure sending request")
- return
- }
-
- result.vmsslouh, err = client.GetOSUpgradeHistoryResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetOSUpgradeHistory", resp, "Failure responding to request")
- return
- }
- if result.vmsslouh.hasNextLink() && result.vmsslouh.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// GetOSUpgradeHistoryPreparer prepares the GetOSUpgradeHistory request.
-func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osUpgradeHistory", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetOSUpgradeHistorySender sends the GetOSUpgradeHistory request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistorySender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetOSUpgradeHistoryResponder handles the response to the GetOSUpgradeHistory request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryResponder(resp *http.Response) (result VirtualMachineScaleSetListOSUpgradeHistory, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// getOSUpgradeHistoryNextResults retrieves the next set of results, if any.
-func (client VirtualMachineScaleSetsClient) getOSUpgradeHistoryNextResults(ctx context.Context, lastResults VirtualMachineScaleSetListOSUpgradeHistory) (result VirtualMachineScaleSetListOSUpgradeHistory, err error) {
- req, err := lastResults.virtualMachineScaleSetListOSUpgradeHistoryPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "getOSUpgradeHistoryNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.GetOSUpgradeHistorySender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "getOSUpgradeHistoryNextResults", resp, "Failure sending next results request")
- }
- result, err = client.GetOSUpgradeHistoryResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "getOSUpgradeHistoryNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// GetOSUpgradeHistoryComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryComplete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListOSUpgradeHistoryIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.GetOSUpgradeHistory")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.GetOSUpgradeHistory(ctx, resourceGroupName, VMScaleSetName)
- return
-}
-
-// List gets a list of all VM scale sets under a resource group.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-func (client VirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result VirtualMachineScaleSetListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.List")
- defer func() {
- sc := -1
- if result.vmsslr.Response.Response != nil {
- sc = result.vmsslr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.vmsslr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.vmsslr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.vmsslr.hasNextLink() && result.vmsslr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client VirtualMachineScaleSetsClient) listNextResults(ctx context.Context, lastResults VirtualMachineScaleSetListResult) (result VirtualMachineScaleSetListResult, err error) {
- req, err := lastResults.virtualMachineScaleSetListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineScaleSetsClient) ListComplete(ctx context.Context, resourceGroupName string) (result VirtualMachineScaleSetListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName)
- return
-}
-
-// ListAll gets a list of all VM Scale Sets in the subscription, regardless of the associated resource group. Use
-// nextLink property in the response to get the next page of VM Scale Sets. Do this till nextLink is null to fetch all
-// the VM Scale Sets.
-func (client VirtualMachineScaleSetsClient) ListAll(ctx context.Context) (result VirtualMachineScaleSetListWithLinkResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ListAll")
- defer func() {
- sc := -1
- if result.vmsslwlr.Response.Response != nil {
- sc = result.vmsslwlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listAllNextResults
- req, err := client.ListAllPreparer(ctx)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListAllSender(req)
- if err != nil {
- result.vmsslwlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending request")
- return
- }
-
- result.vmsslwlr, err = client.ListAllResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure responding to request")
- return
- }
- if result.vmsslwlr.hasNextLink() && result.vmsslwlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListAllPreparer prepares the ListAll request.
-func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListAllSender sends the ListAll request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListAllResponder handles the response to the ListAll request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) ListAllResponder(resp *http.Response) (result VirtualMachineScaleSetListWithLinkResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listAllNextResults retrieves the next set of results, if any.
-func (client VirtualMachineScaleSetsClient) listAllNextResults(ctx context.Context, lastResults VirtualMachineScaleSetListWithLinkResult) (result VirtualMachineScaleSetListWithLinkResult, err error) {
- req, err := lastResults.virtualMachineScaleSetListWithLinkResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listAllNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListAllSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listAllNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListAllResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listAllNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineScaleSetsClient) ListAllComplete(ctx context.Context) (result VirtualMachineScaleSetListWithLinkResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ListAll")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListAll(ctx)
- return
-}
-
-// ListByLocation gets all the VM scale sets under the specified subscription for the specified location.
-// Parameters:
-// location - the location for which VM scale sets under the subscription are queried.
-func (client VirtualMachineScaleSetsClient) ListByLocation(ctx context.Context, location string) (result VirtualMachineScaleSetListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ListByLocation")
- defer func() {
- sc := -1
- if result.vmsslr.Response.Response != nil {
- sc = result.vmsslr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: location,
- Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineScaleSetsClient", "ListByLocation", err.Error())
- }
-
- result.fn = client.listByLocationNextResults
- req, err := client.ListByLocationPreparer(ctx, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListByLocation", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListByLocationSender(req)
- if err != nil {
- result.vmsslr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListByLocation", resp, "Failure sending request")
- return
- }
-
- result.vmsslr, err = client.ListByLocationResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListByLocation", resp, "Failure responding to request")
- return
- }
- if result.vmsslr.hasNextLink() && result.vmsslr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListByLocationPreparer prepares the ListByLocation request.
-func (client VirtualMachineScaleSetsClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachineScaleSets", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListByLocationSender sends the ListByLocation request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) ListByLocationSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListByLocationResponder handles the response to the ListByLocation request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) ListByLocationResponder(resp *http.Response) (result VirtualMachineScaleSetListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listByLocationNextResults retrieves the next set of results, if any.
-func (client VirtualMachineScaleSetsClient) listByLocationNextResults(ctx context.Context, lastResults VirtualMachineScaleSetListResult) (result VirtualMachineScaleSetListResult, err error) {
- req, err := lastResults.virtualMachineScaleSetListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listByLocationNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListByLocationSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listByLocationNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListByLocationResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listByLocationNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListByLocationComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineScaleSetsClient) ListByLocationComplete(ctx context.Context, location string) (result VirtualMachineScaleSetListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ListByLocation")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListByLocation(ctx, location)
- return
-}
-
-// ListSkus gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances allowed
-// for each SKU.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-func (client VirtualMachineScaleSetsClient) ListSkus(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListSkusResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ListSkus")
- defer func() {
- sc := -1
- if result.vmsslsr.Response.Response != nil {
- sc = result.vmsslsr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listSkusNextResults
- req, err := client.ListSkusPreparer(ctx, resourceGroupName, VMScaleSetName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSkusSender(req)
- if err != nil {
- result.vmsslsr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending request")
- return
- }
-
- result.vmsslsr, err = client.ListSkusResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure responding to request")
- return
- }
- if result.vmsslsr.hasNextLink() && result.vmsslsr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListSkusPreparer prepares the ListSkus request.
-func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSkusSender sends the ListSkus request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListSkusResponder handles the response to the ListSkus request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) ListSkusResponder(resp *http.Response) (result VirtualMachineScaleSetListSkusResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listSkusNextResults retrieves the next set of results, if any.
-func (client VirtualMachineScaleSetsClient) listSkusNextResults(ctx context.Context, lastResults VirtualMachineScaleSetListSkusResult) (result VirtualMachineScaleSetListSkusResult, err error) {
- req, err := lastResults.virtualMachineScaleSetListSkusResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listSkusNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSkusSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listSkusNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListSkusResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listSkusNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListSkusComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineScaleSetsClient) ListSkusComplete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListSkusResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ListSkus")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.ListSkus(ctx, resourceGroupName, VMScaleSetName)
- return
-}
-
-// PerformMaintenance perform maintenance on one or more virtual machines in a VM scale set. Operation on instances
-// which are not eligible for perform maintenance will be failed. Please refer to best practices for more details:
-// https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-func (client VirtualMachineScaleSetsClient) PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsPerformMaintenanceFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.PerformMaintenance")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.PerformMaintenancePreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PerformMaintenance", nil, "Failure preparing request")
- return
- }
-
- result, err = client.PerformMaintenanceSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PerformMaintenance", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// PerformMaintenancePreparer prepares the PerformMaintenance request.
-func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/performMaintenance", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMInstanceIDs != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMInstanceIDs))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// PerformMaintenanceSender sends the PerformMaintenance request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachineScaleSetsPerformMaintenanceFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// PerformMaintenanceResponder handles the response to the PerformMaintenance request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) PerformMaintenanceResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// PowerOff power off (stop) one or more virtual machines in a VM scale set. Note that resources are still attached and
-// you are getting charged for the resources. Instead, use deallocate to release resources and avoid charges.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-// skipShutdown - the parameter to request non-graceful VM shutdown. True value for this flag indicates
-// non-graceful shutdown whereas false indicates otherwise. Default value for this flag is false if not
-// specified
-func (client VirtualMachineScaleSetsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, skipShutdown *bool) (result VirtualMachineScaleSetsPowerOffFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.PowerOff")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs, skipShutdown)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure preparing request")
- return
- }
-
- result, err = client.PowerOffSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// PowerOffPreparer prepares the PowerOff request.
-func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, skipShutdown *bool) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if skipShutdown != nil {
- queryParameters["skipShutdown"] = autorest.Encode("query", *skipShutdown)
- } else {
- queryParameters["skipShutdown"] = autorest.Encode("query", false)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMInstanceIDs != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMInstanceIDs))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// PowerOffSender sends the PowerOff request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetsPowerOffFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// PowerOffResponder handles the response to the PowerOff request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Redeploy shuts down all the virtual machines in the virtual machine scale set, moves them to a new node, and powers
-// them back on.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-func (client VirtualMachineScaleSetsClient) Redeploy(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsRedeployFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Redeploy")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RedeployPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Redeploy", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RedeploySender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Redeploy", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RedeployPreparer prepares the Redeploy request.
-func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/redeploy", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMInstanceIDs != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMInstanceIDs))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RedeploySender sends the Redeploy request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) RedeploySender(req *http.Request) (future VirtualMachineScaleSetsRedeployFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RedeployResponder handles the response to the Redeploy request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Reimage reimages (upgrade the operating system) one or more virtual machines in a VM scale set which don't have a
-// ephemeral OS disk, for virtual machines who have a ephemeral OS disk the virtual machine is reset to initial state.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMScaleSetReimageInput - parameters for Reimaging VM ScaleSet.
-func (client VirtualMachineScaleSetsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMScaleSetReimageInput *VirtualMachineScaleSetReimageParameters) (result VirtualMachineScaleSetsReimageFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Reimage")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, VMScaleSetReimageInput)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ReimageSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ReimagePreparer prepares the Reimage request.
-func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMScaleSetReimageInput *VirtualMachineScaleSetReimageParameters) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMScaleSetReimageInput != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMScaleSetReimageInput))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ReimageSender sends the Reimage request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetsReimageFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ReimageResponder handles the response to the Reimage request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// ReimageAll reimages all the disks ( including data disks ) in the virtual machines in a VM scale set. This operation
-// is only supported for managed disks.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-func (client VirtualMachineScaleSetsClient) ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsReimageAllFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.ReimageAll")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ReimageAllPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ReimageAllSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ReimageAllPreparer prepares the ReimageAll request.
-func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimageall", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMInstanceIDs != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMInstanceIDs))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ReimageAllSender sends the ReimageAll request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetsReimageAllFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ReimageAllResponder handles the response to the ReimageAll request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) ReimageAllResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Restart restarts one or more virtual machines in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-func (client VirtualMachineScaleSetsClient) Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsRestartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Restart")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RestartPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RestartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RestartPreparer prepares the Restart request.
-func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMInstanceIDs != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMInstanceIDs))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RestartSender sends the Restart request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetsRestartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RestartResponder handles the response to the Restart request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// SetOrchestrationServiceState changes ServiceState property for a given service
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the virtual machine scale set to create or update.
-// parameters - the input object for SetOrchestrationServiceState API.
-func (client VirtualMachineScaleSetsClient) SetOrchestrationServiceState(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters OrchestrationServiceStateInput) (result VirtualMachineScaleSetsSetOrchestrationServiceStateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.SetOrchestrationServiceState")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.SetOrchestrationServiceStatePreparer(ctx, resourceGroupName, VMScaleSetName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "SetOrchestrationServiceState", nil, "Failure preparing request")
- return
- }
-
- result, err = client.SetOrchestrationServiceStateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "SetOrchestrationServiceState", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// SetOrchestrationServiceStatePreparer prepares the SetOrchestrationServiceState request.
-func (client VirtualMachineScaleSetsClient) SetOrchestrationServiceStatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters OrchestrationServiceStateInput) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/setOrchestrationServiceState", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// SetOrchestrationServiceStateSender sends the SetOrchestrationServiceState request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) SetOrchestrationServiceStateSender(req *http.Request) (future VirtualMachineScaleSetsSetOrchestrationServiceStateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// SetOrchestrationServiceStateResponder handles the response to the SetOrchestrationServiceState request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) SetOrchestrationServiceStateResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Start starts one or more virtual machines in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-func (client VirtualMachineScaleSetsClient) Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsStartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Start")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.StartPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", nil, "Failure preparing request")
- return
- }
-
- result, err = client.StartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// StartPreparer prepares the Start request.
-func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMInstanceIDs != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMInstanceIDs))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// StartSender sends the Start request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetsStartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// StartResponder handles the response to the Start request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Update update a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set to create or update.
-// parameters - the scale set object.
-func (client VirtualMachineScaleSetsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSetUpdate) (result VirtualMachineScaleSetsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSetUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// UpdateInstances upgrades one or more virtual machines to the latest SKU set in the VM scale set model.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
-func (client VirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result VirtualMachineScaleSetsUpdateInstancesFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetsClient.UpdateInstances")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: VMInstanceIDs,
- Constraints: []validation.Constraint{{Target: "VMInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineScaleSetsClient", "UpdateInstances", err.Error())
- }
-
- req, err := client.UpdateInstancesPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateInstancesSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdateInstancesPreparer prepares the UpdateInstances request.
-func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade", pathParameters),
- autorest.WithJSON(VMInstanceIDs),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateInstancesSender sends the UpdateInstances request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (future VirtualMachineScaleSetsUpdateInstancesFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateInstancesResponder handles the response to the UpdateInstances request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetsClient) UpdateInstancesResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go
deleted file mode 100644
index 9713046e25d8..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmextensions.go
+++ /dev/null
@@ -1,457 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineScaleSetVMExtensionsClient is the compute Client
-type VirtualMachineScaleSetVMExtensionsClient struct {
- BaseClient
-}
-
-// NewVirtualMachineScaleSetVMExtensionsClient creates an instance of the VirtualMachineScaleSetVMExtensionsClient
-// client.
-func NewVirtualMachineScaleSetVMExtensionsClient(subscriptionID string) VirtualMachineScaleSetVMExtensionsClient {
- return NewVirtualMachineScaleSetVMExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineScaleSetVMExtensionsClientWithBaseURI creates an instance of the
-// VirtualMachineScaleSetVMExtensionsClient client using a custom endpoint. Use this when interacting with an Azure
-// cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewVirtualMachineScaleSetVMExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMExtensionsClient {
- return VirtualMachineScaleSetVMExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate the operation to create or update the VMSS VM extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// VMExtensionName - the name of the virtual machine extension.
-// extensionParameters - parameters supplied to the Create Virtual Machine Extension operation.
-func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, extensionParameters VirtualMachineScaleSetVMExtension) (result VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMExtensionName, extensionParameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, extensionParameters VirtualMachineScaleSetVMExtension) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmExtensionName": autorest.Encode("path", VMExtensionName),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- extensionParameters.Name = nil
- extensionParameters.Type = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}", pathParameters),
- autorest.WithJSON(extensionParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSetVMExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete the VMSS VM extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// VMExtensionName - the name of the virtual machine extension.
-func (client VirtualMachineScaleSetVMExtensionsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string) (result VirtualMachineScaleSetVMExtensionsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMExtensionName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client VirtualMachineScaleSetVMExtensionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmExtensionName": autorest.Encode("path", VMExtensionName),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get the operation to get the VMSS VM extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// VMExtensionName - the name of the virtual machine extension.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineScaleSetVMExtensionsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, expand string) (result VirtualMachineScaleSetVMExtension, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMExtensionName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineScaleSetVMExtensionsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmExtensionName": autorest.Encode("path", VMExtensionName),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMExtensionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetVMExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List the operation to get all extensions of an instance in Virtual Machine Scaleset.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineScaleSetVMExtensionsClient) List(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand string) (result VirtualMachineScaleSetVMExtensionsListResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.List")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ListPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "List", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "List", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineScaleSetVMExtensionsClient) ListPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMExtensionsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetVMExtensionsListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Update the operation to update the VMSS VM extension.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// VMExtensionName - the name of the virtual machine extension.
-// extensionParameters - parameters supplied to the Update Virtual Machine Extension operation.
-func (client VirtualMachineScaleSetVMExtensionsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, extensionParameters VirtualMachineScaleSetVMExtensionUpdate) (result VirtualMachineScaleSetVMExtensionsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMExtensionsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMExtensionName, extensionParameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMExtensionsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client VirtualMachineScaleSetVMExtensionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMExtensionName string, extensionParameters VirtualMachineScaleSetVMExtensionUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmExtensionName": autorest.Encode("path", VMExtensionName),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- extensionParameters.Name = nil
- extensionParameters.Type = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}", pathParameters),
- autorest.WithJSON(extensionParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMExtensionsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSetVMExtension, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go
deleted file mode 100644
index 4fbb0c104507..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvmruncommands.go
+++ /dev/null
@@ -1,495 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineScaleSetVMRunCommandsClient is the compute Client
-type VirtualMachineScaleSetVMRunCommandsClient struct {
- BaseClient
-}
-
-// NewVirtualMachineScaleSetVMRunCommandsClient creates an instance of the VirtualMachineScaleSetVMRunCommandsClient
-// client.
-func NewVirtualMachineScaleSetVMRunCommandsClient(subscriptionID string) VirtualMachineScaleSetVMRunCommandsClient {
- return NewVirtualMachineScaleSetVMRunCommandsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineScaleSetVMRunCommandsClientWithBaseURI creates an instance of the
-// VirtualMachineScaleSetVMRunCommandsClient client using a custom endpoint. Use this when interacting with an Azure
-// cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
-func NewVirtualMachineScaleSetVMRunCommandsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMRunCommandsClient {
- return VirtualMachineScaleSetVMRunCommandsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// CreateOrUpdate the operation to create or update the VMSS VM run command.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// runCommandName - the name of the virtual machine run command.
-// runCommand - parameters supplied to the Create Virtual Machine RunCommand operation.
-func (client VirtualMachineScaleSetVMRunCommandsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand) (result VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMRunCommandsClient.CreateOrUpdate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, runCommandName, runCommand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "CreateOrUpdate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.CreateOrUpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
-func (client VirtualMachineScaleSetVMRunCommandsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "runCommandName": autorest.Encode("path", runCommandName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}", pathParameters),
- autorest.WithJSON(runCommand),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMRunCommandsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetVMRunCommandsCreateOrUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMRunCommandsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineRunCommand, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// Delete the operation to delete the VMSS VM run command.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// runCommandName - the name of the virtual machine run command.
-func (client VirtualMachineScaleSetVMRunCommandsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, runCommandName string) (result VirtualMachineScaleSetVMRunCommandsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMRunCommandsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, runCommandName)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client VirtualMachineScaleSetVMRunCommandsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, runCommandName string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "runCommandName": autorest.Encode("path", runCommandName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMRunCommandsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMRunCommandsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMRunCommandsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get the operation to get the VMSS VM run command.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// runCommandName - the name of the virtual machine run command.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineScaleSetVMRunCommandsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, runCommandName string, expand string) (result VirtualMachineRunCommand, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMRunCommandsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, runCommandName, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineScaleSetVMRunCommandsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, runCommandName string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "runCommandName": autorest.Encode("path", runCommandName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMRunCommandsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMRunCommandsClient) GetResponder(resp *http.Response) (result VirtualMachineRunCommand, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List the operation to get all run commands of an instance in Virtual Machine Scaleset.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// expand - the expand expression to apply on the operation.
-func (client VirtualMachineScaleSetVMRunCommandsClient) List(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand string) (result VirtualMachineRunCommandsListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMRunCommandsClient.List")
- defer func() {
- sc := -1
- if result.vmrclr.Response.Response != nil {
- sc = result.vmrclr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.vmrclr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.vmrclr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.vmrclr.hasNextLink() && result.vmrclr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineScaleSetVMRunCommandsClient) ListPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMRunCommandsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMRunCommandsClient) ListResponder(resp *http.Response) (result VirtualMachineRunCommandsListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client VirtualMachineScaleSetVMRunCommandsClient) listNextResults(ctx context.Context, lastResults VirtualMachineRunCommandsListResult) (result VirtualMachineRunCommandsListResult, err error) {
- req, err := lastResults.virtualMachineRunCommandsListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineScaleSetVMRunCommandsClient) ListComplete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand string) (result VirtualMachineRunCommandsListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMRunCommandsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName, VMScaleSetName, instanceID, expand)
- return
-}
-
-// Update the operation to update the VMSS VM run command.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// runCommandName - the name of the virtual machine run command.
-// runCommand - parameters supplied to the Update Virtual Machine RunCommand operation.
-func (client VirtualMachineScaleSetVMRunCommandsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate) (result VirtualMachineScaleSetVMRunCommandsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMRunCommandsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, runCommandName, runCommand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMRunCommandsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client VirtualMachineScaleSetVMRunCommandsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "runCommandName": autorest.Encode("path", runCommandName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPatch(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}", pathParameters),
- autorest.WithJSON(runCommand),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMRunCommandsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMRunCommandsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMRunCommandsClient) UpdateResponder(resp *http.Response) (result VirtualMachineRunCommand, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go
deleted file mode 100644
index f46f6c4f52f1..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinescalesetvms.go
+++ /dev/null
@@ -1,1430 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineScaleSetVMsClient is the compute Client
-type VirtualMachineScaleSetVMsClient struct {
- BaseClient
-}
-
-// NewVirtualMachineScaleSetVMsClient creates an instance of the VirtualMachineScaleSetVMsClient client.
-func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineScaleSetVMsClient {
- return NewVirtualMachineScaleSetVMsClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the VirtualMachineScaleSetVMsClient client
-// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
-// clouds, Azure stack).
-func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient {
- return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// Deallocate deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and releases the
-// compute resources it uses. You are not billed for the compute resources of this virtual machine once it is
-// deallocated.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsDeallocateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Deallocate")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeallocateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeallocatePreparer prepares the Deallocate request.
-func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeallocateSender sends the Deallocate request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetVMsDeallocateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeallocateResponder handles the response to the Deallocate request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Delete deletes a virtual machine from a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// forceDeletion - optional parameter to force delete a virtual machine from a VM scale set. (Feature in
-// Preview)
-func (client VirtualMachineScaleSetVMsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, forceDeletion *bool) (result VirtualMachineScaleSetVMsDeleteFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Delete")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, forceDeletion)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", nil, "Failure preparing request")
- return
- }
-
- result, err = client.DeleteSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// DeletePreparer prepares the Delete request.
-func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, forceDeletion *bool) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if forceDeletion != nil {
- queryParameters["forceDeletion"] = autorest.Encode("query", *forceDeletion)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsDelete(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// DeleteSender sends the Delete request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMsDeleteFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// DeleteResponder handles the response to the Delete request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Get gets a virtual machine from a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// expand - the expand expression to apply on the operation. 'InstanceView' will retrieve the instance view of
-// the virtual machine. 'UserData' will retrieve the UserData of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (result VirtualMachineScaleSetVM, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Get")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetPreparer prepares the Get request.
-func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(string(expand)) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetSender sends the Get request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetResponder handles the response to the Get request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// GetInstanceView gets the status of a virtual machine from a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.GetInstanceView")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.GetInstanceViewPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.GetInstanceViewSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure sending request")
- return
- }
-
- result, err = client.GetInstanceViewResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// GetInstanceViewPreparer prepares the GetInstanceView request.
-func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// GetInstanceViewSender sends the GetInstanceView request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetVMInstanceView, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// List gets a list of all virtual machines in a VM scale sets.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// virtualMachineScaleSetName - the name of the VM scale set.
-// filter - the filter to apply to the operation. Allowed values are 'startswith(instanceView/statuses/code,
-// 'PowerState') eq true', 'properties/latestModelApplied eq true', 'properties/latestModelApplied eq false'.
-// selectParameter - the list parameters. Allowed values are 'instanceView', 'instanceView/statuses'.
-// expand - the expand expression to apply to the operation. Allowed values are 'instanceView'.
-func (client VirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResultPage, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.List")
- defer func() {
- sc := -1
- if result.vmssvlr.Response.Response != nil {
- sc = result.vmssvlr.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.vmssvlr.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending request")
- return
- }
-
- result.vmssvlr, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure responding to request")
- return
- }
- if result.vmssvlr.hasNextLink() && result.vmssvlr.IsEmpty() {
- err = result.NextWithContext(ctx)
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if len(filter) > 0 {
- queryParameters["$filter"] = autorest.Encode("query", filter)
- }
- if len(selectParameter) > 0 {
- queryParameters["$select"] = autorest.Encode("query", selectParameter)
- }
- if len(expand) > 0 {
- queryParameters["$expand"] = autorest.Encode("query", expand)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetVMListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// listNextResults retrieves the next set of results, if any.
-func (client VirtualMachineScaleSetVMsClient) listNextResults(ctx context.Context, lastResults VirtualMachineScaleSetVMListResult) (result VirtualMachineScaleSetVMListResult, err error) {
- req, err := lastResults.virtualMachineScaleSetVMListResultPreparer(ctx)
- if err != nil {
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "listNextResults", nil, "Failure preparing next results request")
- }
- if req == nil {
- return
- }
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "listNextResults", resp, "Failure sending next results request")
- }
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "listNextResults", resp, "Failure responding to next results request")
- }
- return
-}
-
-// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client VirtualMachineScaleSetVMsClient) ListComplete(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResultIterator, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.List")
- defer func() {
- sc := -1
- if result.Response().Response.Response != nil {
- sc = result.page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- result.page, err = client.List(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand)
- return
-}
-
-// PerformMaintenance performs maintenance on a virtual machine in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.PerformMaintenance")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.PerformMaintenancePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PerformMaintenance", nil, "Failure preparing request")
- return
- }
-
- result, err = client.PerformMaintenanceSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PerformMaintenance", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// PerformMaintenancePreparer prepares the PerformMaintenance request.
-func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/performMaintenance", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// PerformMaintenanceSender sends the PerformMaintenance request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// PerformMaintenanceResponder handles the response to the PerformMaintenance request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// PowerOff power off (stop) a virtual machine in a VM scale set. Note that resources are still attached and you are
-// getting charged for the resources. Instead, use deallocate to release resources and avoid charges.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// skipShutdown - the parameter to request non-graceful VM shutdown. True value for this flag indicates
-// non-graceful shutdown whereas false indicates otherwise. Default value for this flag is false if not
-// specified
-func (client VirtualMachineScaleSetVMsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, skipShutdown *bool) (result VirtualMachineScaleSetVMsPowerOffFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.PowerOff")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, skipShutdown)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure preparing request")
- return
- }
-
- result, err = client.PowerOffSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// PowerOffPreparer prepares the PowerOff request.
-func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, skipShutdown *bool) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if skipShutdown != nil {
- queryParameters["skipShutdown"] = autorest.Encode("query", *skipShutdown)
- } else {
- queryParameters["skipShutdown"] = autorest.Encode("query", false)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// PowerOffSender sends the PowerOff request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetVMsPowerOffFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// PowerOffResponder handles the response to the PowerOff request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Redeploy shuts down the virtual machine in the virtual machine scale set, moves it to a new node, and powers it back
-// on.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) Redeploy(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsRedeployFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Redeploy")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RedeployPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Redeploy", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RedeploySender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Redeploy", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RedeployPreparer prepares the Redeploy request.
-func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/redeploy", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RedeploySender sends the Redeploy request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) RedeploySender(req *http.Request) (future VirtualMachineScaleSetVMsRedeployFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RedeployResponder handles the response to the Redeploy request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Reimage reimages (upgrade the operating system) a specific virtual machine in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// VMScaleSetVMReimageInput - parameters for the Reimaging Virtual machine in ScaleSet.
-func (client VirtualMachineScaleSetVMsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMScaleSetVMReimageInput *VirtualMachineScaleSetVMReimageParameters) (result VirtualMachineScaleSetVMsReimageFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Reimage")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMScaleSetVMReimageInput)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ReimageSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ReimagePreparer prepares the Reimage request.
-func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMScaleSetVMReimageInput *VirtualMachineScaleSetVMReimageParameters) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- if VMScaleSetVMReimageInput != nil {
- preparer = autorest.DecoratePreparer(preparer,
- autorest.WithJSON(VMScaleSetVMReimageInput))
- }
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ReimageSender sends the Reimage request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ReimageResponder handles the response to the Reimage request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// ReimageAll allows you to re-image all the disks ( including data disks ) in the a VM scale set instance. This
-// operation is only supported for managed disks.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsReimageAllFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.ReimageAll")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.ReimageAllPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", nil, "Failure preparing request")
- return
- }
-
- result, err = client.ReimageAllSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// ReimageAllPreparer prepares the ReimageAll request.
-func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimageall", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ReimageAllSender sends the ReimageAll request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageAllFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// ReimageAllResponder handles the response to the ReimageAll request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) ReimageAllResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Restart restarts a virtual machine in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsRestartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Restart")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RestartPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RestartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RestartPreparer prepares the Restart request.
-func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RestartSender sends the Restart request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetVMsRestartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RestartResponder handles the response to the Restart request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// RetrieveBootDiagnosticsData the operation to retrieve SAS URIs of boot diagnostic logs for a virtual machine in a VM
-// scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// sasURIExpirationTimeInMinutes - expiration duration in minutes for the SAS URIs with a value between 1 to
-// 1440 minutes.
NOTE: If not specified, SAS URIs will be generated with a default expiration duration
-// of 120 minutes.
-func (client VirtualMachineScaleSetVMsClient) RetrieveBootDiagnosticsData(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, sasURIExpirationTimeInMinutes *int32) (result RetrieveBootDiagnosticsDataResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.RetrieveBootDiagnosticsData")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.RetrieveBootDiagnosticsDataPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, sasURIExpirationTimeInMinutes)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "RetrieveBootDiagnosticsData", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.RetrieveBootDiagnosticsDataSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "RetrieveBootDiagnosticsData", resp, "Failure sending request")
- return
- }
-
- result, err = client.RetrieveBootDiagnosticsDataResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "RetrieveBootDiagnosticsData", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// RetrieveBootDiagnosticsDataPreparer prepares the RetrieveBootDiagnosticsData request.
-func (client VirtualMachineScaleSetVMsClient) RetrieveBootDiagnosticsDataPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, sasURIExpirationTimeInMinutes *int32) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
- if sasURIExpirationTimeInMinutes != nil {
- queryParameters["sasUriExpirationTimeInMinutes"] = autorest.Encode("query", *sasURIExpirationTimeInMinutes)
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/retrieveBootDiagnosticsData", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RetrieveBootDiagnosticsDataSender sends the RetrieveBootDiagnosticsData request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) RetrieveBootDiagnosticsDataSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// RetrieveBootDiagnosticsDataResponder handles the response to the RetrieveBootDiagnosticsData request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) RetrieveBootDiagnosticsDataResponder(resp *http.Response) (result RetrieveBootDiagnosticsDataResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// RunCommand run command on a virtual machine in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-// parameters - parameters supplied to the Run command operation.
-func (client VirtualMachineScaleSetVMsClient) RunCommand(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters RunCommandInput) (result VirtualMachineScaleSetVMsRunCommandFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.RunCommand")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.CommandID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineScaleSetVMsClient", "RunCommand", err.Error())
- }
-
- req, err := client.RunCommandPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "RunCommand", nil, "Failure preparing request")
- return
- }
-
- result, err = client.RunCommandSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "RunCommand", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// RunCommandPreparer prepares the RunCommand request.
-func (client VirtualMachineScaleSetVMsClient) RunCommandPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters RunCommandInput) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/runCommand", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// RunCommandSender sends the RunCommand request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) RunCommandSender(req *http.Request) (future VirtualMachineScaleSetVMsRunCommandFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// RunCommandResponder handles the response to the RunCommand request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) RunCommandResponder(resp *http.Response) (result RunCommandResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
-
-// SimulateEviction the operation to simulate the eviction of spot virtual machine in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) SimulateEviction(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result autorest.Response, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.SimulateEviction")
- defer func() {
- sc := -1
- if result.Response != nil {
- sc = result.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.SimulateEvictionPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "SimulateEviction", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.SimulateEvictionSender(req)
- if err != nil {
- result.Response = resp
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "SimulateEviction", resp, "Failure sending request")
- return
- }
-
- result, err = client.SimulateEvictionResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "SimulateEviction", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// SimulateEvictionPreparer prepares the SimulateEviction request.
-func (client VirtualMachineScaleSetVMsClient) SimulateEvictionPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/simulateEviction", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// SimulateEvictionSender sends the SimulateEviction request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) SimulateEvictionSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// SimulateEvictionResponder handles the response to the SimulateEviction request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) SimulateEvictionResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Start starts a virtual machine in a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set.
-// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsStartFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Start")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.StartPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", nil, "Failure preparing request")
- return
- }
-
- result, err = client.StartSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// StartPreparer prepares the Start request.
-func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// StartSender sends the Start request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetVMsStartFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// StartResponder handles the response to the Start request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
-// Update updates a virtual machine of a VM scale set.
-// Parameters:
-// resourceGroupName - the name of the resource group.
-// VMScaleSetName - the name of the VM scale set where the extension should be create or updated.
-// instanceID - the instance ID of the virtual machine.
-// parameters - parameters supplied to the Update Virtual Machine Scale Sets VM operation.
-func (client VirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM) (result VirtualMachineScaleSetVMsUpdateFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Update")
- defer func() {
- sc := -1
- if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
- sc = result.FutureAPI.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
- }},
- {Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
- }},
- }},
- }},
- }},
- }}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineScaleSetVMsClient", "Update", err.Error())
- }
-
- req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Update", nil, "Failure preparing request")
- return
- }
-
- result, err = client.UpdateSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Update", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// UpdatePreparer prepares the Update request.
-func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "instanceId": autorest.Encode("path", instanceID),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- "vmScaleSetName": autorest.Encode("path", VMScaleSetName),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- parameters.InstanceID = nil
- parameters.Sku = nil
- parameters.Resources = nil
- parameters.Zones = nil
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPut(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters),
- autorest.WithJSON(parameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// UpdateSender sends the Update request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMsUpdateFuture, err error) {
- var resp *http.Response
- future.FutureAPI = &azure.Future{}
- resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
- if err != nil {
- return
- }
- var azf azure.Future
- azf, err = azure.NewFutureFromResponse(resp)
- future.FutureAPI = &azf
- future.Result = future.result
- return
-}
-
-// UpdateResponder handles the response to the Update request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineScaleSetVMsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go
deleted file mode 100644
index 394d659c218c..000000000000
--- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/virtualmachinesizes.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package compute
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-//
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-import (
- "context"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/Azure/go-autorest/autorest/validation"
- "github.com/Azure/go-autorest/tracing"
- "net/http"
-)
-
-// VirtualMachineSizesClient is the compute Client
-type VirtualMachineSizesClient struct {
- BaseClient
-}
-
-// NewVirtualMachineSizesClient creates an instance of the VirtualMachineSizesClient client.
-func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient {
- return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID)
-}
-
-// NewVirtualMachineSizesClientWithBaseURI creates an instance of the VirtualMachineSizesClient client using a custom
-// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
-// stack).
-func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient {
- return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)}
-}
-
-// List this API is deprecated. Use [Resources Skus](https://docs.microsoft.com/rest/api/compute/resourceskus/list)
-// Parameters:
-// location - the location upon which virtual-machine-sizes is queried.
-func (client VirtualMachineSizesClient) List(ctx context.Context, location string) (result VirtualMachineSizeListResult, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineSizesClient.List")
- defer func() {
- sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- if err := validation.Validate([]validation.Validation{
- {TargetValue: location,
- Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
- return result, validation.NewError("compute.VirtualMachineSizesClient", "List", err.Error())
- }
-
- req, err := client.ListPreparer(ctx, location)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", nil, "Failure preparing request")
- return
- }
-
- resp, err := client.ListSender(req)
- if err != nil {
- result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure sending request")
- return
- }
-
- result, err = client.ListResponder(resp)
- if err != nil {
- err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure responding to request")
- return
- }
-
- return
-}
-
-// ListPreparer prepares the List request.
-func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "location": autorest.Encode("path", location),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2021-07-01"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsGet(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes", pathParameters),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// ListSender sends the List request. The method will close the
-// http.Response Body if it receives an error.
-func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) {
- return client.Send(req, azure.DoRetryWithRegistration(client.Client))
-}
-
-// ListResponder handles the response to the List request. The method always
-// closes the http.Response Body.
-func (client VirtualMachineSizesClient) ListResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) {
- err = autorest.Respond(
- resp,
- azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result),
- autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
- return
-}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
similarity index 100%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
new file mode 100644
index 000000000000..ab51212676f9
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
@@ -0,0 +1,68 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system.
+
+Here is a general template for an ANTLR based recognizer in Go:
+
+ .
+ ├── myproject
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.0-complete.jar
+ │ ├── error_listeners.go
+ │ ├── generate.go
+ │ ├── generate.sh
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options.
+
+From the command line at the root of your package “myproject” you can then simply issue the command:
+
+ go generate ./...
+
+# Copyright Notice
+
+Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+*/
+package antlr
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
similarity index 72%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
index a4e2079e656b..98010d2e6e68 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -6,11 +6,24 @@ package antlr
import "sync"
+// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
+// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
var ATNInvalidAltNumber int
+// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
+// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
+// in existence.
+//
+// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
+//
+// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
+// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
+// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
type ATN struct {
// DecisionToState is the decision points for all rules, subrules, optional
- // blocks, ()+, ()*, etc. Used to build DFA predictors for them.
+ // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
+ // can go back later and build DFA predictors for them. This includes
+ // all the rules, subrules, optional blocks, ()+, ()* etc...
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
@@ -45,6 +58,8 @@ type ATN struct {
edgeMu sync.RWMutex
}
+// NewATN returns a new ATN struct representing the given grammarType and is used
+// for runtime deserialization of ATNs from the code generated by the ANTLR tool
func NewATN(grammarType int, maxTokenType int) *ATN {
return &ATN{
grammarType: grammarType,
@@ -53,7 +68,7 @@ func NewATN(grammarType int, maxTokenType int) *ATN {
}
}
-// NextTokensInContext computes the set of valid tokens that can occur starting
+// NextTokensInContext computes and returns the set of valid tokens that can occur starting
// in state s. If ctx is nil, the set of tokens will not include what can follow
// the rule surrounding s. In other words, the set will be restricted to tokens
// reachable staying within the rule of s.
@@ -61,8 +76,8 @@ func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
return NewLL1Analyzer(a).Look(s, nil, ctx)
}
-// NextTokensNoContext computes the set of valid tokens that can occur starting
-// in s and staying in same rule. Token.EPSILON is in set if we reach end of
+// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
+// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
// rule.
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
a.mu.Lock()
@@ -76,6 +91,8 @@ func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
return iset
}
+// NextTokens computes and returns the set of valid tokens starting in state s, by
+// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
if ctx == nil {
return a.NextTokensNoContext(s)
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
similarity index 84%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
index 97ba417f74f0..7619fa172edc 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -8,19 +8,14 @@ import (
"fmt"
)
-type comparable interface {
- equals(other interface{}) bool
-}
-
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
// context). The syntactic context is a graph-structured stack node whose
// path(s) to the root is the rule invocation(s) chain used to arrive at the
// state. The semantic context is the tree of semantic predicates encountered
// before reaching an ATN state.
type ATNConfig interface {
- comparable
-
- hash() int
+ Equals(o Collectable[ATNConfig]) bool
+ Hash() int
GetState() ATNState
GetAlt() int
@@ -47,7 +42,7 @@ type BaseATNConfig struct {
reachesIntoOuterContext int
}
-func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
+func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
return &BaseATNConfig{
state: old.state,
alt: old.alt,
@@ -135,11 +130,16 @@ func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
b.reachesIntoOuterContext = v
}
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
// An ATN configuration is equal to another if both have the same state, they
// predict the same alternative, and syntactic/semantic contexts are the same.
-func (b *BaseATNConfig) equals(o interface{}) bool {
+func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
if b == o {
return true
+ } else if o == nil {
+ return false
}
var other, ok = o.(*BaseATNConfig)
@@ -153,30 +153,32 @@ func (b *BaseATNConfig) equals(o interface{}) bool {
if b.context == nil {
equal = other.context == nil
} else {
- equal = b.context.equals(other.context)
+ equal = b.context.Equals(other.context)
}
var (
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
alts = b.alt == other.alt
- cons = b.semanticContext.equals(other.semanticContext)
+ cons = b.semanticContext.Equals(other.semanticContext)
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
)
return nums && alts && cons && sups && equal
}
-func (b *BaseATNConfig) hash() int {
+// Hash is the default hash function for BaseATNConfig, when no specialist hash function
+// is required for a collection
+func (b *BaseATNConfig) Hash() int {
var c int
if b.context != nil {
- c = b.context.hash()
+ c = b.context.Hash()
}
h := murmurInit(7)
h = murmurUpdate(h, b.state.GetStateNumber())
h = murmurUpdate(h, b.alt)
h = murmurUpdate(h, c)
- h = murmurUpdate(h, b.semanticContext.hash())
+ h = murmurUpdate(h, b.semanticContext.Hash())
return murmurFinish(h, 4)
}
@@ -243,7 +245,9 @@ func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *Lex
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
}
-func (l *LexerATNConfig) hash() int {
+// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Hash() int {
var f int
if l.passedThroughNonGreedyDecision {
f = 1
@@ -253,15 +257,20 @@ func (l *LexerATNConfig) hash() int {
h := murmurInit(7)
h = murmurUpdate(h, l.state.GetStateNumber())
h = murmurUpdate(h, l.alt)
- h = murmurUpdate(h, l.context.hash())
- h = murmurUpdate(h, l.semanticContext.hash())
+ h = murmurUpdate(h, l.context.Hash())
+ h = murmurUpdate(h, l.semanticContext.Hash())
h = murmurUpdate(h, f)
- h = murmurUpdate(h, l.lexerActionExecutor.hash())
+ h = murmurUpdate(h, l.lexerActionExecutor.Hash())
h = murmurFinish(h, 6)
return h
}
-func (l *LexerATNConfig) equals(other interface{}) bool {
+// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
+ if l == other {
+ return true
+ }
var othert, ok = other.(*LexerATNConfig)
if l == other {
@@ -275,7 +284,7 @@ func (l *LexerATNConfig) equals(other interface{}) bool {
var b bool
if l.lexerActionExecutor != nil {
- b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
+ b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
} else {
b = othert.lexerActionExecutor != nil
}
@@ -284,10 +293,9 @@ func (l *LexerATNConfig) equals(other interface{}) bool {
return false
}
- return l.BaseATNConfig.equals(othert.BaseATNConfig)
+ return l.BaseATNConfig.Equals(othert.BaseATNConfig)
}
-
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
var ds, ok = target.(DecisionState)
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
similarity index 81%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
index 49ad4a7197a8..43e9b33f3bd2 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
@@ -1,24 +1,25 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
-import "fmt"
+import (
+ "fmt"
+)
type ATNConfigSet interface {
- hash() int
+ Hash() int
+ Equals(o Collectable[ATNConfig]) bool
Add(ATNConfig, *DoubleDict) bool
AddAll([]ATNConfig) bool
- GetStates() Set
+ GetStates() *JStore[ATNState, Comparator[ATNState]]
GetPredicates() []SemanticContext
GetItems() []ATNConfig
OptimizeConfigs(interpreter *BaseATNSimulator)
- Equals(other interface{}) bool
-
Length() int
IsEmpty() bool
Contains(ATNConfig) bool
@@ -57,7 +58,7 @@ type BaseATNConfigSet struct {
// effectively doubles the number of objects associated with ATNConfigs. All
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
// read-only because a set becomes a DFA state.
- configLookup Set
+ configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
// configs is the added elements.
configs []ATNConfig
@@ -83,7 +84,7 @@ type BaseATNConfigSet struct {
// readOnly is whether it is read-only. Do not
// allow any code to manipulate the set if true because DFA states will point at
- // sets and those must not change. It not protect other fields; conflictingAlts
+ // sets and those must not change. It not, protect other fields; conflictingAlts
// in particular, which is assigned after readOnly.
readOnly bool
@@ -104,7 +105,7 @@ func (b *BaseATNConfigSet) Alts() *BitSet {
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
return &BaseATNConfigSet{
cachedHash: -1,
- configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
+ configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
fullCtx: fullCtx,
}
}
@@ -126,9 +127,11 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
b.dipsIntoOuterContext = true
}
- existing := b.configLookup.Add(config).(ATNConfig)
+ existing, present := b.configLookup.Put(config)
- if existing == config {
+ // The config was not already in the set
+ //
+ if !present {
b.cachedHash = -1
b.configs = append(b.configs, config) // Track order here
return true
@@ -154,11 +157,14 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
return true
}
-func (b *BaseATNConfigSet) GetStates() Set {
- states := newArray2DHashSet(nil, nil)
+func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
for i := 0; i < len(b.configs); i++ {
- states.Add(b.configs[i].GetState())
+ states.Put(b.configs[i].GetState())
}
return states
@@ -214,7 +220,34 @@ func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
return false
}
-func (b *BaseATNConfigSet) Equals(other interface{}) bool {
+// Compare is a hack function just to verify that adding DFAstares to the known
+// set works, so long as comparison of ATNConfigSet s works. For that to work, we
+// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
+// know the order, so we do this inefficient hack. If this proves the point, then
+// we can change the config set to a better structure.
+func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+
+ for _, c := range b.configs {
+ found := false
+ for _, c2 := range bs.configs {
+ if c.Equals(c2) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return false
+ }
+
+ }
+ return true
+}
+
+func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
if b == other {
return true
} else if _, ok := other.(*BaseATNConfigSet); !ok {
@@ -224,15 +257,15 @@ func (b *BaseATNConfigSet) Equals(other interface{}) bool {
other2 := other.(*BaseATNConfigSet)
return b.configs != nil &&
- // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
b.fullCtx == other2.fullCtx &&
b.uniqueAlt == other2.uniqueAlt &&
b.conflictingAlts == other2.conflictingAlts &&
b.hasSemanticContext == other2.hasSemanticContext &&
- b.dipsIntoOuterContext == other2.dipsIntoOuterContext
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
}
-func (b *BaseATNConfigSet) hash() int {
+func (b *BaseATNConfigSet) Hash() int {
if b.readOnly {
if b.cachedHash == -1 {
b.cachedHash = b.hashCodeConfigs()
@@ -247,7 +280,7 @@ func (b *BaseATNConfigSet) hash() int {
func (b *BaseATNConfigSet) hashCodeConfigs() int {
h := 1
for _, config := range b.configs {
- h = 31*h + config.hash()
+ h = 31*h + config.Hash()
}
return h
}
@@ -283,7 +316,7 @@ func (b *BaseATNConfigSet) Clear() {
b.configs = make([]ATNConfig, 0)
b.cachedHash = -1
- b.configLookup = newArray2DHashSet(nil, equalATNConfigs)
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
}
func (b *BaseATNConfigSet) FullContext() bool {
@@ -365,7 +398,8 @@ type OrderedATNConfigSet struct {
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
b := NewBaseATNConfigSet(false)
- b.configLookup = newArray2DHashSet(nil, nil)
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
return &OrderedATNConfigSet{BaseATNConfigSet: b}
}
@@ -375,7 +409,7 @@ func hashATNConfig(i interface{}) int {
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
- hash = 31*hash + o.GetSemanticContext().hash()
+ hash = 31*hash + o.GetSemanticContext().Hash()
return hash
}
@@ -403,5 +437,5 @@ func equalATNConfigs(a, b interface{}) bool {
return false
}
- return ai.GetSemanticContext().equals(bi.GetSemanticContext())
+ return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
similarity index 96%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
index cb8eafb0b2ad..3c975ec7bfda 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
index aea9bbfa936e..3888856b4b66 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
similarity index 94%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
index d5454d6d5ddd..41529115fa69 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
similarity index 97%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
index 3835bb2e9312..1f2a56bc3118 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -49,7 +49,8 @@ type ATNState interface {
AddTransition(Transition, int)
String() string
- hash() int
+ Hash() int
+ Equals(Collectable[ATNState]) bool
}
type BaseATNState struct {
@@ -123,7 +124,7 @@ func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
as.NextTokenWithinRule = v
}
-func (as *BaseATNState) hash() int {
+func (as *BaseATNState) Hash() int {
return as.stateNumber
}
@@ -131,7 +132,7 @@ func (as *BaseATNState) String() string {
return strconv.Itoa(as.stateNumber)
}
-func (as *BaseATNState) equals(other interface{}) bool {
+func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
if ot, ok := other.(ATNState); ok {
return as.stateNumber == ot.GetStateNumber()
}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
similarity index 79%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
index a7b48976b311..3a515a145f46 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
similarity index 82%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
index 70c1207f7ffd..c33f0adb5e12 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
similarity index 96%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
index 330ff8f31f8a..1bb0314ea09c 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
index c90e9b8904c8..c6c9485a20ae 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -331,10 +331,12 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
c.lazyInit()
- c.Fill()
if interval == nil {
+ c.Fill()
interval = NewInterval(0, len(c.tokens)-1)
+ } else {
+ c.Sync(interval.Stop)
}
start := interval.Start
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
new file mode 100644
index 000000000000..9ea3200536a5
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
@@ -0,0 +1,147 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+// This file contains all the implementations of custom comparators used for generic collections when the
+// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would
+// put the comparators in the source file for the struct themselves, but given the organization of this code is
+// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by
+// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig
+// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended -
+// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection.
+// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations.
+
+// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of
+// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some
+// type safety and avoid having to implement this for every type that we want to perform comparison on.
+//
+// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
+// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
+type ObjEqComparator[T Collectable[T]] struct{}
+
+var (
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[ATNConfig]{}
+ aConfCompInst = &ATNConfigComparator[ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
+ dfaStateEqInst = &ObjEqComparator[*DFAState]{}
+ semctxEqInst = &ObjEqComparator[SemanticContext]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
+)
+
+// Equals2 delegates to the Equals() method of type T
+func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool {
+ return o1.Equals(o2)
+}
+
+// Hash1 delegates to the Hash() method of type T
+func (c *ObjEqComparator[T]) Hash1(o T) int {
+
+ return o.Hash()
+}
+
+type SemCComparator[T Collectable[T]] struct{}
+
+// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type ATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets
+type ATNAltConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetContext().Equals(o2.GetContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, o.GetState().GetStateNumber())
+ h = murmurUpdate(h, o.GetContext().Hash())
+ return murmurFinish(h, 2)
+}
+
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type BaseATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
+// delegates to the standard Hash() method of the ATNConfig type.
+func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
+
+ return o.Hash()
+}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
similarity index 80%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
index d55a2a87d5d1..bfd43e1f731d 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
@@ -1,13 +1,9 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
-import (
- "sort"
-)
-
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
@@ -15,8 +11,15 @@ type DFA struct {
decision int
// states is all the DFA states. Use Map to get the old state back; Set can only
- // indicate whether it is there.
- states map[int]*DFAState
+ // indicate whether it is there. Go maps implement key hash collisions and so on and are very
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
+ // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
+ // to see if they really are the same object.
+ //
+ //
+ states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
+
+ numstates int
s0 *DFAState
@@ -29,7 +32,7 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
dfa := &DFA{
atnStartState: atnStartState,
decision: decision,
- states: make(map[int]*DFAState),
+ states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
}
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
dfa.precedenceDfa = true
@@ -92,7 +95,8 @@ func (d *DFA) getPrecedenceDfa() bool {
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
- d.setStates(make(map[int]*DFAState))
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
+ d.numstates = 0
if precedenceDfa {
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
@@ -117,38 +121,12 @@ func (d *DFA) setS0(s *DFAState) {
d.s0 = s
}
-func (d *DFA) getState(hash int) (*DFAState, bool) {
- s, ok := d.states[hash]
- return s, ok
-}
-
-func (d *DFA) setStates(states map[int]*DFAState) {
- d.states = states
-}
-
-func (d *DFA) setState(hash int, state *DFAState) {
- d.states[hash] = state
-}
-
-func (d *DFA) numStates() int {
- return len(d.states)
-}
-
-type dfaStateList []*DFAState
-
-func (d dfaStateList) Len() int { return len(d) }
-func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
-func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
-
// sortedStates returns the states in d sorted by their state number.
func (d *DFA) sortedStates() []*DFAState {
- vs := make([]*DFAState, 0, len(d.states))
-
- for _, v := range d.states {
- vs = append(vs, v)
- }
- sort.Sort(dfaStateList(vs))
+ vs := d.states.SortedSlice(func(i, j *DFAState) bool {
+ return i.stateNumber < j.stateNumber
+ })
return vs
}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
similarity index 97%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
index bf2ccc06cd13..84d0a31e5362 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
similarity index 90%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
index 970ed19865a9..c90dec55c868 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -90,16 +90,16 @@ func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
}
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
-func (d *DFAState) GetAltSet() Set {
- alts := newArray2DHashSet(nil, nil)
+func (d *DFAState) GetAltSet() []int {
+ var alts []int
if d.configs != nil {
for _, c := range d.configs.GetItems() {
- alts.Add(c.GetAlt())
+ alts = append(alts, c.GetAlt())
}
}
- if alts.Len() == 0 {
+ if len(alts) == 0 {
return nil
}
@@ -130,27 +130,6 @@ func (d *DFAState) setPrediction(v int) {
d.prediction = v
}
-// equals returns whether d equals other. Two DFAStates are equal if their ATN
-// configuration sets are the same. This method is used to see if a state
-// already exists.
-//
-// Because the number of alternatives and number of ATN configurations are
-// finite, there is a finite number of DFA states that can be processed. This is
-// necessary to show that the algorithm terminates.
-//
-// Cannot test the DFA state numbers here because in
-// ParserATNSimulator.addDFAState we need to know if any other state exists that
-// has d exact set of ATN configurations. The stateNumber is irrelevant.
-func (d *DFAState) equals(other interface{}) bool {
- if d == other {
- return true
- } else if _, ok := other.(*DFAState); !ok {
- return false
- }
-
- return d.configs.Equals(other.(*DFAState).configs)
-}
-
func (d *DFAState) String() string {
var s string
if d.isAcceptState {
@@ -164,8 +143,27 @@ func (d *DFAState) String() string {
return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
}
-func (d *DFAState) hash() int {
+func (d *DFAState) Hash() int {
h := murmurInit(7)
- h = murmurUpdate(h, d.configs.hash())
+ h = murmurUpdate(h, d.configs.Hash())
return murmurFinish(h, 1)
}
+
+// Equals returns whether d equals other. Two DFAStates are equal if their ATN
+// configuration sets are the same. This method is used to see if a state
+// already exists.
+//
+// Because the number of alternatives and number of ATN configurations are
+// finite, there is a finite number of DFA states that can be processed. This is
+// necessary to show that the algorithm terminates.
+//
+// Cannot test the DFA state numbers here because in
+// ParserATNSimulator.addDFAState we need to know if any other state exists that
+// has d exact set of ATN configurations. The stateNumber is irrelevant.
+func (d *DFAState) Equals(o Collectable[*DFAState]) bool {
+ if d == o {
+ return true
+ }
+
+ return d.configs.Equals(o.(*DFAState).configs)
+}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
index 1fec43d9dca3..c55bcc19b2c8 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -87,7 +87,6 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
return strconv.Itoa(decision) + " (" + ruleName + ")"
}
-//
// Computes the set of conflicting or ambiguous alternatives from a
// configuration set, if that information was not already provided by the
// parser.
@@ -97,7 +96,6 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
// @param configs The conflicting or ambiguous configuration set.
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
// returns the set of alternatives represented in {@code configs}.
-//
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
if ReportedAlts != nil {
return ReportedAlts
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
index 028e1a9d7f03..f679f0dcd5e1 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -48,12 +48,9 @@ func NewConsoleErrorListener() *ConsoleErrorListener {
return new(ConsoleErrorListener)
}
-//
// Provides a default instance of {@link ConsoleErrorListener}.
-//
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
-//
// {@inheritDoc}
//
//
@@ -64,7 +61,6 @@ var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
//
// line line:charPositionInLine msg
//
-//
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
index c4080dbfd185..5c0a637ba4aa 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -23,7 +23,6 @@ type ErrorStrategy interface {
// This is the default implementation of {@link ANTLRErrorStrategy} used for
// error Reporting and recovery in ANTLR parsers.
-//
type DefaultErrorStrategy struct {
errorRecoveryMode bool
lastErrorIndex int
@@ -61,12 +60,10 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) {
d.endErrorCondition(recognizer)
}
-//
// This method is called to enter error recovery mode when a recognition
// exception is Reported.
//
// @param recognizer the parser instance
-//
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
d.errorRecoveryMode = true
}
@@ -75,28 +72,23 @@ func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
return d.errorRecoveryMode
}
-//
// This method is called to leave error recovery mode after recovering from
// a recognition exception.
//
// @param recognizer
-//
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
d.errorRecoveryMode = false
d.lastErrorStates = nil
d.lastErrorIndex = -1
}
-//
// {@inheritDoc}
//
// The default implementation simply calls {@link //endErrorCondition}.
-//
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
d.endErrorCondition(recognizer)
}
-//
// {@inheritDoc}
//
// The default implementation returns immediately if the handler is already
@@ -114,7 +106,6 @@ func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
//
All other types: calls {@link Parser//NotifyErrorListeners} to Report
// the exception
//
-//
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
// if we've already Reported an error and have not Matched a token
// yet successfully, don't Report any errors.
@@ -142,7 +133,6 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep
// The default implementation reSynchronizes the parser by consuming tokens
// until we find one in the reSynchronization set--loosely the set of tokens
// that can follow the current rule.
-//
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
@@ -206,7 +196,6 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException
// compare token set at the start of the loop and at each iteration. If for
// some reason speed is suffering for you, you can turn off d
// functionality by simply overriding d method as a blank { }.
-//
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
// If already recovering, don't try to Sync
if d.InErrorRecoveryMode(recognizer) {
@@ -247,7 +236,6 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
//
// @param recognizer the parser instance
// @param e the recognition exception
-//
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
tokens := recognizer.GetTokenStream()
var input string
@@ -264,7 +252,6 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-//
// This is called by {@link //ReportError} when the exception is an
// {@link InputMisMatchException}.
//
@@ -272,14 +259,12 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
//
// @param recognizer the parser instance
// @param e the recognition exception
-//
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-//
// This is called by {@link //ReportError} when the exception is a
// {@link FailedPredicateException}.
//
@@ -287,7 +272,6 @@ func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *Inpu
//
// @param recognizer the parser instance
// @param e the recognition exception
-//
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
msg := "rule " + ruleName + " " + e.message
@@ -310,7 +294,6 @@ func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *Faile
// {@link Parser//NotifyErrorListeners}.
//
// @param recognizer the parser instance
-//
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
if d.InErrorRecoveryMode(recognizer) {
return
@@ -339,7 +322,6 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
// {@link Parser//NotifyErrorListeners}.
//
// @param recognizer the parser instance
-//
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
if d.InErrorRecoveryMode(recognizer) {
return
@@ -392,15 +374,14 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
// derivation:
//
//
-// => ID '=' '(' INT ')' ('+' atom)* ''
+// => ID '=' '(' INT ')' ('+' atom)* ”
// ^
//
//
-// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
-// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
+// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
// is in the set of tokens that can follow the {@code ')'} token reference
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
-//
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// SINGLE TOKEN DELETION
MatchedSymbol := d.SingleTokenDeletion(recognizer)
@@ -418,7 +399,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
panic(NewInputMisMatchException(recognizer))
}
-//
// This method implements the single-token insertion inline error recovery
// strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this
@@ -434,7 +414,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// @param recognizer the parser instance
// @return {@code true} if single-token insertion is a viable recovery
// strategy for the current mismatched input, otherwise {@code false}
-//
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
currentSymbolType := recognizer.GetTokenStream().LA(1)
// if current token is consistent with what could come after current
@@ -469,7 +448,6 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
// @return the successfully Matched {@link Token} instance if single-token
// deletion successfully recovers from the mismatched input, otherwise
// {@code nil}
-//
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
NextTokenType := recognizer.GetTokenStream().LA(2)
expecting := d.GetExpectedTokens(recognizer)
@@ -507,7 +485,6 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
// a CommonToken of the appropriate type. The text will be the token.
// If you change what tokens must be created by the lexer,
// override d method to create the appropriate tokens.
-//
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
currentSymbol := recognizer.GetCurrentToken()
expecting := d.GetExpectedTokens(recognizer)
@@ -546,7 +523,6 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
// the token). This is better than forcing you to override a method in
// your token objects because you don't have to go modify your lexer
// so that it creates a NewJava type.
-//
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
if t == nil {
return ""
@@ -578,7 +554,7 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// from within the rule i.e., the FIRST computation done by
// ANTLR stops at the end of a rule.
//
-// EXAMPLE
+// # EXAMPLE
//
// When you find a "no viable alt exception", the input is not
// consistent with any of the alternatives for rule r. The best
@@ -597,7 +573,6 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// c : ID
// | INT
//
-//
// At each rule invocation, the set of tokens that could follow
// that rule is pushed on a stack. Here are the various
// context-sensitive follow sets:
@@ -660,7 +635,6 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
//
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
// at run-time upon error to avoid overhead during parsing.
-//
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
atn := recognizer.GetInterpreter().atn
ctx := recognizer.GetParserRuleContext()
@@ -733,7 +707,6 @@ func NewBailErrorStrategy() *BailErrorStrategy {
// in a {@link ParseCancellationException} so it is not caught by the
// rule func catches. Use {@link Exception//getCause()} to get the
// original {@link RecognitionException}.
-//
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
context := recognizer.GetParserRuleContext()
for context != nil {
@@ -749,7 +722,6 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
// Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception.
-//
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
b.Recover(recognizer, NewInputMisMatchException(recognizer))
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
index 2ef74926ecb1..3954c1378299 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -74,7 +74,6 @@ func (b *BaseRecognitionException) GetInputStream() IntStream {
// If the state number is not known, b method returns -1.
-//
// Gets the set of input symbols which could potentially follow the
// previously Matched symbol at the time b exception was panicn.
//
@@ -136,7 +135,6 @@ type NoViableAltException struct {
// to take based upon the remaining input. It tracks the starting token
// of the offending input and also knows where the parser was
// in the various paths when the error. Reported by ReportNoViableAlternative()
-//
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
if ctx == nil {
@@ -177,7 +175,6 @@ type InputMisMatchException struct {
// This signifies any kind of mismatched input exceptions such as
// when the current input does not Match the expected token.
-//
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
i := new(InputMisMatchException)
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
similarity index 92%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
index 842170c086c9..bd6ad5efe3de 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
similarity index 96%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
index 5ff270f53689..a8b889cedb95 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
similarity index 82%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
index 438e0ea6e754..4778878bd0db 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
index 1e9393adb60e..c1e155e8180e 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -223,6 +223,10 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
return i.toIndexString()
}
+func (i *IntervalSet) GetIntervals() []*Interval {
+ return i.intervals
+}
+
func (i *IntervalSet) toCharString() string {
names := make([]string, len(i.intervals))
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
new file mode 100644
index 000000000000..e5a74f0c6c49
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
@@ -0,0 +1,198 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "sort"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(value)
+
+ for _, v1 := range s.store[kh] {
+ if s.comparator.Equals2(value, v1) {
+ return v1, true
+ }
+ }
+ s.store[kh] = append(s.store[kh], value)
+ s.len++
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(key)
+
+ for _, v := range s.store[kh] {
+ if s.comparator.Equals2(key, v) {
+ return v, true
+ }
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
+
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ for _, v := range e {
+ vs = append(vs, v)
+ }
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
+ return &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) {
+ kh := m.comparator.Hash1(key)
+
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ m.len++
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+
+ var none V
+ kh := m.comparator.Hash1(key)
+ for _, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ return e.val, true
+ }
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return len(m.store)
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
index b04f04572f1d..6533f0516453 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -232,8 +232,6 @@ func (b *BaseLexer) NextToken() Token {
}
return b.token
}
-
- return nil
}
// Instruct the lexer to Skip creating a token for current lexer rule
@@ -342,7 +340,7 @@ func (b *BaseLexer) GetCharIndex() int {
}
// Return the text Matched so far for the current token or any text override.
-//Set the complete text of l token it wipes any previous changes to the text.
+// Set the complete text of l token it wipes any previous changes to the text.
func (b *BaseLexer) GetText() string {
if b.text != "" {
return b.text
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
similarity index 91%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
index 5a325be13720..111656c29529 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -21,8 +21,8 @@ type LexerAction interface {
getActionType() int
getIsPositionDependent() bool
execute(lexer Lexer)
- hash() int
- equals(other LexerAction) bool
+ Hash() int
+ Equals(other LexerAction) bool
}
type BaseLexerAction struct {
@@ -51,15 +51,14 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
return b.isPositionDependent
}
-func (b *BaseLexerAction) hash() int {
+func (b *BaseLexerAction) Hash() int {
return b.actionType
}
-func (b *BaseLexerAction) equals(other LexerAction) bool {
+func (b *BaseLexerAction) Equals(other LexerAction) bool {
return b == other
}
-//
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
//
// The {@code Skip} command does not have any parameters, so l action is
@@ -85,7 +84,8 @@ func (l *LexerSkipAction) String() string {
return "skip"
}
-// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+//
// with the assigned type.
type LexerTypeAction struct {
*BaseLexerAction
@@ -104,14 +104,14 @@ func (l *LexerTypeAction) execute(lexer Lexer) {
lexer.SetType(l.thetype)
}
-func (l *LexerTypeAction) hash() int {
+func (l *LexerTypeAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.thetype)
return murmurFinish(h, 2)
}
-func (l *LexerTypeAction) equals(other LexerAction) bool {
+func (l *LexerTypeAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerTypeAction); !ok {
@@ -148,14 +148,14 @@ func (l *LexerPushModeAction) execute(lexer Lexer) {
lexer.PushMode(l.mode)
}
-func (l *LexerPushModeAction) hash() int {
+func (l *LexerPushModeAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.mode)
return murmurFinish(h, 2)
}
-func (l *LexerPushModeAction) equals(other LexerAction) bool {
+func (l *LexerPushModeAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerPushModeAction); !ok {
@@ -245,14 +245,14 @@ func (l *LexerModeAction) execute(lexer Lexer) {
lexer.SetMode(l.mode)
}
-func (l *LexerModeAction) hash() int {
+func (l *LexerModeAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.mode)
return murmurFinish(h, 2)
}
-func (l *LexerModeAction) equals(other LexerAction) bool {
+func (l *LexerModeAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerModeAction); !ok {
@@ -303,7 +303,7 @@ func (l *LexerCustomAction) execute(lexer Lexer) {
lexer.Action(nil, l.ruleIndex, l.actionIndex)
}
-func (l *LexerCustomAction) hash() int {
+func (l *LexerCustomAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.ruleIndex)
@@ -311,13 +311,14 @@ func (l *LexerCustomAction) hash() int {
return murmurFinish(h, 3)
}
-func (l *LexerCustomAction) equals(other LexerAction) bool {
+func (l *LexerCustomAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerCustomAction); !ok {
return false
} else {
- return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex &&
+ l.actionIndex == other.(*LexerCustomAction).actionIndex
}
}
@@ -344,14 +345,14 @@ func (l *LexerChannelAction) execute(lexer Lexer) {
lexer.SetChannel(l.channel)
}
-func (l *LexerChannelAction) hash() int {
+func (l *LexerChannelAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.channel)
return murmurFinish(h, 2)
}
-func (l *LexerChannelAction) equals(other LexerAction) bool {
+func (l *LexerChannelAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerChannelAction); !ok {
@@ -412,10 +413,10 @@ func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
l.lexerAction.execute(lexer)
}
-func (l *LexerIndexedCustomAction) hash() int {
+func (l *LexerIndexedCustomAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.offset)
- h = murmurUpdate(h, l.lexerAction.hash())
+ h = murmurUpdate(h, l.lexerAction.Hash())
return murmurFinish(h, 2)
}
@@ -425,6 +426,7 @@ func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
return false
} else {
- return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
+ return l.offset == other.(*LexerIndexedCustomAction).offset &&
+ l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction)
}
}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
similarity index 88%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
index 056941dd6e7f..be1ba7a7e308 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
@@ -1,9 +1,11 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
+import "golang.org/x/exp/slices"
+
// Represents an executor for a sequence of lexer actions which traversed during
// the Matching operation of a lexer rule (token).
//
@@ -12,8 +14,8 @@ package antlr
// not cause bloating of the {@link DFA} created for the lexer.
type LexerActionExecutor struct {
- lexerActions []LexerAction
- cachedHash int
+ lexerActions []LexerAction
+ cachedHash int
}
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
@@ -30,7 +32,7 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
l.cachedHash = murmurInit(57)
for _, a := range lexerActions {
- l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
+ l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
}
return l
@@ -151,14 +153,17 @@ func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex
}
}
-func (l *LexerActionExecutor) hash() int {
+func (l *LexerActionExecutor) Hash() int {
if l == nil {
+ // TODO: Why is this here? l should not be nil
return 61
}
+
+ // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
return l.cachedHash
}
-func (l *LexerActionExecutor) equals(other interface{}) bool {
+func (l *LexerActionExecutor) Equals(other interface{}) bool {
if l == other {
return true
}
@@ -169,5 +174,13 @@ func (l *LexerActionExecutor) equals(other interface{}) bool {
if othert == nil {
return false
}
- return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions
+ if l.cachedHash != othert.cachedHash {
+ return false
+ }
+ if len(l.lexerActions) != len(othert.lexerActions) {
+ return false
+ }
+ return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
+ return i.Equals(j)
+ })
}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
index dc05153ea445..c573b7521004 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -591,19 +591,24 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
}
- hash := proposed.hash()
dfa := l.decisionToDFA[l.mode]
l.atn.stateMu.Lock()
defer l.atn.stateMu.Unlock()
- existing, ok := dfa.getState(hash)
- if ok {
+ existing, present := dfa.states.Get(proposed)
+ if present {
+
+ // This state was already present, so just return it.
+ //
proposed = existing
} else {
- proposed.stateNumber = dfa.numStates()
+
+ // We need to add the new state
+ //
+ proposed.stateNumber = dfa.states.Len()
configs.SetReadOnly(true)
proposed.configs = configs
- dfa.setState(hash, proposed)
+ dfa.states.Put(proposed)
}
if !suppressEdge {
dfa.setS0(proposed)
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
similarity index 87%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
index 6ffb37de6944..76689615a6d0 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -14,14 +14,15 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
return la
}
-//* Special value added to the lookahead sets to indicate that we hit
-// a predicate during analysis if {@code seeThruPreds==false}.
-///
+// - Special value added to the lookahead sets to indicate that we hit
+// a predicate during analysis if {@code seeThruPreds==false}.
+//
+// /
const (
LL1AnalyzerHitPred = TokenInvalidType
)
-//*
+// *
// Calculates the SLL(1) expected lookahead set for each outgoing transition
// of an {@link ATNState}. The returned array has one element for each
// outgoing transition in {@code s}. If the closure from transition
@@ -38,7 +39,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
look := make([]*IntervalSet, count)
for alt := 0; alt < count; alt++ {
look[alt] = NewIntervalSet()
- lookBusy := newArray2DHashSet(nil, nil)
+ lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
seeThruPreds := false // fail to get lookahead upon pred
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
// Wipe out lookahead for la alternative if we found nothing
@@ -50,7 +51,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
return look
}
-//*
+// *
// Compute set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
//
@@ -67,7 +68,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
//
// @return The set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
-///
+// /
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
r := NewIntervalSet()
seeThruPreds := true // ignore preds get all lookahead
@@ -75,7 +76,7 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
if ctx != nil {
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
}
- la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
+ la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
return r
}
@@ -109,14 +110,14 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
// outermost context is reached. This parameter has no effect if {@code ctx}
// is {@code nil}.
-func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
returnState := la.atn.states[ctx.getReturnState(i)]
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}
-func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
c := NewBaseATNConfig6(s, 0, ctx)
@@ -124,8 +125,11 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
return
}
- lookBusy.Add(c)
+ _, present := lookBusy.Put(c)
+ if present {
+ return
+ }
if s == stopState {
if ctx == nil {
look.addOne(TokenEpsilon)
@@ -198,7 +202,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
}
}
-func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
similarity index 99%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
index 2ab2f5605219..d26bf063920a 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -91,7 +91,6 @@ func NewBaseParser(input TokenStream) *BaseParser {
// bypass alternatives.
//
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
-//
var bypassAltsAtnCache = make(map[string]int)
// reset the parser's state//
@@ -230,7 +229,6 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener {
// @param listener the listener to add
//
// @panics nilPointerException if {@code} listener is {@code nil}
-//
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
if listener == nil {
panic("listener")
@@ -241,13 +239,11 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
p.parseListeners = append(p.parseListeners, listener)
}
-//
// Remove {@code listener} from the list of parse listeners.
//
// If {@code listener} is {@code nil} or has not been added as a parse
// listener, p.method does nothing.
// @param listener the listener to remove
-//
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
if p.parseListeners != nil {
@@ -289,11 +285,9 @@ func (p *BaseParser) TriggerEnterRuleEvent() {
}
}
-//
// Notify any parse listeners of an exit rule event.
//
// @see //addParseListener
-//
func (p *BaseParser) TriggerExitRuleEvent() {
if p.parseListeners != nil {
// reverse order walk of listeners
@@ -330,7 +324,6 @@ func (p *BaseParser) setTokenFactory(factory TokenFactory) {
//
// @panics UnsupportedOperationException if the current parser does not
// implement the {@link //getSerializedATN()} method.
-//
func (p *BaseParser) GetATNWithBypassAlts() {
// TODO
@@ -402,7 +395,6 @@ func (p *BaseParser) SetTokenStream(input TokenStream) {
// Match needs to return the current input symbol, which gets put
// into the label for the associated token ref e.g., x=ID.
-//
func (p *BaseParser) GetCurrentToken() Token {
return p.input.LT(1)
}
@@ -624,7 +616,6 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool {
// respectively.
//
// @see ATN//getExpectedTokens(int, RuleContext)
-//
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
}
@@ -686,7 +677,7 @@ func (p *BaseParser) GetDFAStrings() string {
func (p *BaseParser) DumpDFA() {
seenOne := false
for _, dfa := range p.Interpreter.decisionToDFA {
- if dfa.numStates() > 0 {
+ if dfa.states.Len() > 0 {
if seenOne {
fmt.Println()
}
@@ -703,7 +694,6 @@ func (p *BaseParser) GetSourceName() string {
// During a parse is sometimes useful to listen in on the rule entry and exit
// events as well as token Matches. p.is for quick and dirty debugging.
-//
func (p *BaseParser) SetTrace(trace *TraceListener) {
if trace == nil {
p.RemoveParseListener(p.tracer)
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
similarity index 94%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
index 888d512975a5..8bcc46a0d992 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -11,11 +11,11 @@ import (
)
var (
- ParserATNSimulatorDebug = false
- ParserATNSimulatorListATNDecisions = false
- ParserATNSimulatorDFADebug = false
- ParserATNSimulatorRetryDebug = false
- TurnOffLRLoopEntryBranchOpt = false
+ ParserATNSimulatorDebug = false
+ ParserATNSimulatorTraceATNSim = false
+ ParserATNSimulatorDFADebug = false
+ ParserATNSimulatorRetryDebug = false
+ TurnOffLRLoopEntryBranchOpt = false
)
type ParserATNSimulator struct {
@@ -70,8 +70,8 @@ func (p *ParserATNSimulator) reset() {
}
func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
- fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
" exec LA(1)==" + p.getLookaheadName(input) +
" line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
strconv.Itoa(input.LT(1).GetColumn()))
@@ -111,15 +111,15 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
if s0 == nil {
if outerContext == nil {
- outerContext = RuleContextEmpty
+ outerContext = ParserRuleContextEmpty
}
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ if ParserATNSimulatorDebug {
fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
" exec LA(1)==" + p.getLookaheadName(input) +
", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
}
fullCtx := false
- s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx)
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
p.atn.stateMu.Lock()
if dfa.getPrecedenceDfa() {
@@ -174,17 +174,18 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
// Reporting insufficient predicates
// cover these cases:
-// dead end
-// single alt
-// single alt + preds
-// conflict
-// conflict + preds
//
+// dead end
+// single alt
+// single alt + preds
+// conflict
+// conflict + preds
func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
" line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
}
@@ -277,8 +278,6 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
t = input.LA(1)
}
}
-
- panic("Should not have reached p state")
}
// Get an existing target state for an edge in the DFA. If the target state
@@ -384,7 +383,7 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState
// comes back with reach.uniqueAlt set to a valid alt
func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("execATNWithFullContext " + s0.String())
}
@@ -492,9 +491,6 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT
}
func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
- if ParserATNSimulatorDebug {
- fmt.Println("in computeReachSet, starting closure: " + closure.String())
- }
if p.mergeCache == nil {
p.mergeCache = NewDoubleDict()
}
@@ -570,7 +566,7 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
//
if reach == nil {
reach = NewBaseATNConfigSet(fullCtx)
- closureBusy := newArray2DHashSet(nil, nil)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
treatEOFAsEpsilon := t == TokenEOF
amount := len(intermediate.configs)
for k := 0; k < amount; k++ {
@@ -610,6 +606,11 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
reach.Add(skippedStopStates[l], p.mergeCache)
}
}
+
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
if len(reach.GetItems()) == 0 {
return nil
}
@@ -617,7 +618,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
return reach
}
-//
// Return a configuration set containing only the configurations from
// {@code configs} which are in a {@link RuleStopState}. If all
// configurations in {@code configs} are already in a rule stop state, p
@@ -636,7 +636,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
// @return {@code configs} if all configurations in {@code configs} are in a
// rule stop state, otherwise return a Newconfiguration set containing only
// the configurations from {@code configs} which are in a rule stop state
-//
func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
if PredictionModeallConfigsInRuleStopStates(configs) {
return configs
@@ -662,16 +661,20 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full
// always at least the implicit call to start rule
initialContext := predictionContextFromRuleContext(p.atn, ctx)
configs := NewBaseATNConfigSet(fullCtx)
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
for i := 0; i < len(a.GetTransitions()); i++ {
target := a.GetTransitions()[i].getTarget()
c := NewBaseATNConfig6(target, i+1, initialContext)
- closureBusy := newArray2DHashSet(nil, nil)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
p.closure(c, configs, closureBusy, true, fullCtx, false)
}
return configs
}
-//
// This method transforms the start state computed by
// {@link //computeStartState} to the special start state used by a
// precedence DFA for a particular precedence value. The transformation
@@ -726,7 +729,6 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full
// @return The transformed configuration set representing the start state
// for a precedence DFA at a particular precedence level (determined by
// calling {@link Parser//getPrecedence}).
-//
func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
statesFromAlt1 := make(map[int]PredictionContext)
@@ -760,7 +762,7 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConf
// (basically a graph subtraction algorithm).
if !config.getPrecedenceFilterSuppressed() {
context := statesFromAlt1[config.GetState().GetStateNumber()]
- if context != nil && context.equals(config.GetContext()) {
+ if context != nil && context.Equals(config.GetContext()) {
// eliminated
continue
}
@@ -824,7 +826,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
return pairs
}
-//
// This method is used to improve the localization of error messages by
// choosing an alternative rather than panicing a
// {@link NoViableAltException} in particular prediction scenarios where the
@@ -869,7 +870,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
// @return The value to return from {@link //AdaptivePredict}, or
// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
// identified and {@link //AdaptivePredict} should Report an error instead.
-//
func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
semValidConfigs := cfgs[0]
@@ -938,11 +938,11 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigS
}
// Look through a list of predicate/alt pairs, returning alts for the
-// pairs that win. A {@code NONE} predicate indicates an alt containing an
-// unpredicated config which behaves as "always true." If !complete
-// then we stop at the first predicate that evaluates to true. This
-// includes pairs with nil predicates.
//
+// pairs that win. A {@code NONE} predicate indicates an alt containing an
+// unpredicated config which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
predictions := NewBitSet()
for i := 0; i < len(predPredictions); i++ {
@@ -972,16 +972,16 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti
return predictions
}
-func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
initialDepth := 0
p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
fullCtx, initialDepth, treatEOFAsEpsilon)
}
-func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if ParserATNSimulatorDebug {
+func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if ParserATNSimulatorTraceATNSim {
fmt.Println("closure(" + config.String() + ")")
- fmt.Println("configs(" + configs.String() + ")")
+ //fmt.Println("configs(" + configs.String() + ")")
if config.GetReachesIntoOuterContext() > 50 {
panic("problem")
}
@@ -1031,7 +1031,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs
}
// Do the actual work of walking epsilon edges//
-func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
state := config.GetState()
// optimization
if !state.GetEpsilonOnlyTransitions() {
@@ -1066,7 +1066,8 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet,
c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
- if closureBusy.Add(c) != c {
+ _, present := closureBusy.Put(c)
+ if present {
// avoid infinite recursion for right-recursive rules
continue
}
@@ -1077,9 +1078,13 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet,
fmt.Println("dips into outer ctx: " + c.String())
}
} else {
- if !t.getIsEpsilon() && closureBusy.Add(c) != c {
- // avoid infinite recursion for EOF* and EOF+
- continue
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
}
if _, ok := t.(*RuleTransition); ok {
// latch when newDepth goes negative - once we step out of the entry context we can't return
@@ -1104,7 +1109,16 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC
// left-recursion elimination. For efficiency, also check if
// the context has an empty stack case. If so, it would mean
// global FOLLOW so we can't perform optimization
- if startLoop, ok := _p.(StarLoopEntryState); !ok || !startLoop.precedenceRuleDecision || config.GetContext().isEmpty() || config.GetContext().hasEmptyPath() {
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
return false
}
@@ -1117,8 +1131,8 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC
return false
}
}
-
- decisionStartState := _p.(BlockStartState).GetTransitions()[0].getTarget().(BlockStartState)
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
blockEndStateNum := decisionStartState.getEndState().stateNumber
blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
@@ -1355,13 +1369,12 @@ func (p *ParserATNSimulator) GetTokenName(t int) string {
return "EOF"
}
- if p.parser != nil && p.parser.GetLiteralNames() != nil {
- if t >= len(p.parser.GetLiteralNames()) {
- fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ","))
- // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect
- } else {
- return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
- }
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
+ return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
}
return strconv.Itoa(t)
@@ -1372,9 +1385,9 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
}
// Used for debugging in AdaptivePredict around execATN but I cut
-// it out for clarity now that alg. works well. We can leave p
-// "dead" code for a bit.
//
+// it out for clarity now that alg. works well. We can leave p
+// "dead" code for a bit.
func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
panic("Not implemented")
@@ -1421,7 +1434,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
return alt
}
-//
// Add an edge to the DFA, if possible. This method calls
// {@link //addDFAState} to ensure the {@code to} state is present in the
// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
@@ -1440,7 +1452,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
// @return If {@code to} is {@code nil}, p method returns {@code nil}
// otherwise p method returns the result of calling {@link //addDFAState}
// on {@code to}
-//
func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
if ParserATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
@@ -1472,7 +1483,6 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
return to
}
-//
// Add state {@code D} to the DFA if it is not already present, and return
// the actual instance stored in the DFA. If a state equivalent to {@code D}
// is already in the DFA, the existing state is returned. Otherwise p
@@ -1486,25 +1496,30 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
// @return The state stored in the DFA. This will be either the existing
// state if {@code D} is already in the DFA, or {@code D} itself if the
// state was not already present.
-//
func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
if d == ATNSimulatorError {
return d
}
- hash := d.hash()
- existing, ok := dfa.getState(hash)
- if ok {
+ existing, present := dfa.states.Get(d)
+ if present {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Print("addDFAState " + d.String() + " exists")
+ }
return existing
}
- d.stateNumber = dfa.numStates()
+
+ // The state was not present, so update it with configs
+ //
+ d.stateNumber = dfa.states.Len()
if !d.configs.ReadOnly() {
d.configs.OptimizeConfigs(p.BaseATNSimulator)
d.configs.SetReadOnly(true)
}
- dfa.setState(hash, d)
- if ParserATNSimulatorDebug {
- fmt.Println("adding NewDFA state: " + d.String())
+ dfa.states.Put(d)
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("addDFAState new " + d.String())
}
+
return d
}
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
index 49cd10c5ffca..1c8cee74795e 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -340,7 +340,7 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
return s
}
-var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
type InterpreterRuleContext interface {
ParserRuleContext
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
similarity index 81%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
index 9fdfd52b26ce..ba62af361086 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
@@ -1,10 +1,12 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
+ "fmt"
+ "golang.org/x/exp/slices"
"strconv"
)
@@ -26,10 +28,10 @@ var (
)
type PredictionContext interface {
- hash() int
+ Hash() int
+ Equals(interface{}) bool
GetParent(int) PredictionContext
getReturnState(int) int
- equals(PredictionContext) bool
length() int
isEmpty() bool
hasEmptyPath() bool
@@ -53,7 +55,7 @@ func (b *BasePredictionContext) isEmpty() bool {
func calculateHash(parent PredictionContext, returnState int) int {
h := murmurInit(1)
- h = murmurUpdate(h, parent.hash())
+ h = murmurUpdate(h, parent.Hash())
h = murmurUpdate(h, returnState)
return murmurFinish(h, 2)
}
@@ -86,7 +88,6 @@ func NewPredictionContextCache() *PredictionContextCache {
// Add a context to the cache and return it. If the context already exists,
// return that one instead and do not add a Newcontext to the cache.
// Protect shared cache from unsafe thread access.
-//
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
if ctx == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY
@@ -160,28 +161,28 @@ func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
return b.returnState == BasePredictionContextEmptyReturnState
}
-func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
+func (b *BaseSingletonPredictionContext) Hash() int {
+ return b.cachedHash
+}
+
+func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
if b == other {
return true
- } else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
+ }
+ if _, ok := other.(*BaseSingletonPredictionContext); !ok {
return false
- } else if b.hash() != other.hash() {
- return false // can't be same if hash is different
}
otherP := other.(*BaseSingletonPredictionContext)
- if b.returnState != other.getReturnState(0) {
+ if b.returnState != otherP.getReturnState(0) {
return false
- } else if b.parentCtx == nil {
+ }
+ if b.parentCtx == nil {
return otherP.parentCtx == nil
}
- return b.parentCtx.equals(otherP.parentCtx)
-}
-
-func (b *BaseSingletonPredictionContext) hash() int {
- return b.cachedHash
+ return b.parentCtx.Equals(otherP.parentCtx)
}
func (b *BaseSingletonPredictionContext) String() string {
@@ -215,7 +216,7 @@ func NewEmptyPredictionContext() *EmptyPredictionContext {
p := new(EmptyPredictionContext)
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
-
+ p.cachedHash = calculateEmptyHash()
return p
}
@@ -231,7 +232,11 @@ func (e *EmptyPredictionContext) getReturnState(index int) int {
return e.returnState
}
-func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
+func (e *EmptyPredictionContext) Hash() int {
+ return e.cachedHash
+}
+
+func (e *EmptyPredictionContext) Equals(other interface{}) bool {
return e == other
}
@@ -254,7 +259,7 @@ func NewArrayPredictionContext(parents []PredictionContext, returnStates []int)
hash := murmurInit(1)
for _, parent := range parents {
- hash = murmurUpdate(hash, parent.hash())
+ hash = murmurUpdate(hash, parent.Hash())
}
for _, returnState := range returnStates {
@@ -298,18 +303,31 @@ func (a *ArrayPredictionContext) getReturnState(index int) int {
return a.returnStates[index]
}
-func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
- if _, ok := other.(*ArrayPredictionContext); !ok {
+// Equals is the default comparison function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Equals(o interface{}) bool {
+ if a == o {
+ return true
+ }
+ other, ok := o.(*ArrayPredictionContext)
+ if !ok {
return false
- } else if a.cachedHash != other.hash() {
+ }
+ if a.cachedHash != other.Hash() {
return false // can't be same if hash is different
- } else {
- otherP := other.(*ArrayPredictionContext)
- return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
}
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(a.returnStates, other.returnStates) &&
+ slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
+ return x.Equals(y)
+ })
}
-func (a *ArrayPredictionContext) hash() int {
+// Hash is the default hash function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Hash() int {
return a.BasePredictionContext.cachedHash
}
@@ -343,11 +361,11 @@ func (a *ArrayPredictionContext) String() string {
// /
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
if outerContext == nil {
- outerContext = RuleContextEmpty
+ outerContext = ParserRuleContextEmpty
}
// if we are in RuleContext of start rule, s, then BasePredictionContext
// is EMPTY. Nobody called us. (if we are empty, return empty)
- if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
return BasePredictionContextEMPTY
}
// If we have a parent, convert it to a BasePredictionContext graph
@@ -359,11 +377,20 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) Predicti
}
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- // share same graph if both same
- if a == b {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
return a
}
+ // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
+ // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
+ // from it.
+ // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
+ // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
+ // either of them.
+
ac, ok1 := a.(*BaseSingletonPredictionContext)
bc, ok2 := b.(*BaseSingletonPredictionContext)
@@ -380,17 +407,32 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
return b
}
}
- // convert singleton so both are arrays to normalize
- if _, ok := a.(*BaseSingletonPredictionContext); ok {
- a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+
+ // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
+ // here.
+ //
+ // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
+
+ var arp, arb *ArrayPredictionContext
+ var ok bool
+ if arp, ok = a.(*ArrayPredictionContext); ok {
+ } else if _, ok = a.(*BaseSingletonPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+ } else if _, ok = a.(*EmptyPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
}
- if _, ok := b.(*BaseSingletonPredictionContext); ok {
- b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+
+ if arb, ok = b.(*ArrayPredictionContext); ok {
+ } else if _, ok = b.(*BaseSingletonPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+ } else if _, ok = b.(*EmptyPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
}
- return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
+
+ // Both arp and arb
+ return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
}
-//
// Merge two {@link SingletonBasePredictionContext} instances.
//
// Stack tops equal, parents merge is same return left graph.
@@ -423,11 +465,11 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
// /
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
- previous := mergeCache.Get(a.hash(), b.hash())
+ previous := mergeCache.Get(a.Hash(), b.Hash())
if previous != nil {
return previous.(PredictionContext)
}
- previous = mergeCache.Get(b.hash(), a.hash())
+ previous = mergeCache.Get(b.Hash(), a.Hash())
if previous != nil {
return previous.(PredictionContext)
}
@@ -436,7 +478,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
rootMerge := mergeRoot(a, b, rootIsWildcard)
if rootMerge != nil {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), rootMerge)
+ mergeCache.set(a.Hash(), b.Hash(), rootMerge)
}
return rootMerge
}
@@ -456,7 +498,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
// Newjoined parent so create Newsingleton pointing to it, a'
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), spc)
+ mergeCache.set(a.Hash(), b.Hash(), spc)
}
return spc
}
@@ -478,7 +520,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
parents := []PredictionContext{singleParent, singleParent}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), apc)
+ mergeCache.set(a.Hash(), b.Hash(), apc)
}
return apc
}
@@ -494,12 +536,11 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), apc)
+ mergeCache.set(a.Hash(), b.Hash(), apc)
}
return apc
}
-//
// Handle case where at least one of {@code a} or {@code b} is
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
// to represent {@link //EMPTY}.
@@ -561,7 +602,6 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC
return nil
}
-//
// Merge two {@link ArrayBasePredictionContext} instances.
//
//
Different tops, different parents.
@@ -583,12 +623,18 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC
// /
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
- previous := mergeCache.Get(a.hash(), b.hash())
+ previous := mergeCache.Get(a.Hash(), b.Hash())
if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
return previous.(PredictionContext)
}
- previous = mergeCache.Get(b.hash(), a.hash())
+ previous = mergeCache.Get(b.Hash(), a.Hash())
if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
return previous.(PredictionContext)
}
}
@@ -608,7 +654,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
payload := a.returnStates[i]
// $+$ = $
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
- axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
+ axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
// ->
// ax
if bothDollars || axAX {
@@ -651,7 +697,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
if k == 1 { // for just one merged element, return singleton top
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), pc)
+ mergeCache.set(a.Hash(), b.Hash(), pc)
}
return pc
}
@@ -663,27 +709,36 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
// if we created same array as a or b, return that instead
// TODO: track whether this is possible above during merge sort for speed
+ // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
if M == a {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), a)
+ mergeCache.set(a.Hash(), b.Hash(), a)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
}
return a
}
if M == b {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), b)
+ mergeCache.set(a.Hash(), b.Hash(), b)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
}
return b
}
combineCommonParents(mergedParents)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), M)
+ mergeCache.set(a.Hash(), b.Hash(), M)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
}
return M
}
-//
// Make pass over all M {@code parents} merge any {@code equals()}
// ones.
// /
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
similarity index 95%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
index 15718f912bc0..7b9b72fab1e5 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -70,7 +70,6 @@ const (
PredictionModeLLExactAmbigDetection = 2
)
-//
// Computes the SLL prediction termination condition.
//
//
@@ -108,9 +107,9 @@ const (
// The single-alt-state thing lets prediction continue upon rules like
// (otherwise, it would admit defeat too soon):
//
-// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
+// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
//
-// When the ATN simulation reaches the state before {@code ''}, it has a
+//
When the ATN simulation reaches the state before {@code ”}, it has a
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
// processing this node because alternative to has another way to continue,
@@ -152,16 +151,15 @@ const (
//
//
Before testing these configurations against others, we have to merge
// {@code x} and {@code x'} (without modifying the existing configurations).
-// For example, we test {@code (x+x')==x''} when looking for conflicts in
+// For example, we test {@code (x+x')==x”} when looking for conflicts in
// the following configurations.
//
-// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
//
// If the configuration set has predicates (as indicated by
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
// the configurations to strip out all of the predicates so that a standard
// {@link ATNConfigSet} will merge everything ignoring predicates.
-//
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
// Configs in rule stop states indicate reaching the end of the decision
// rule (local context) or end of start rule (full context). If all
@@ -229,7 +227,6 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
return true
}
-//
// Full LL prediction termination.
//
// Can we stop looking ahead during ATN simulation or is there some
@@ -334,7 +331,7 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
//
//
//
{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
-// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
+// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
// {@code {1}} => stop and predict 1
//
@@ -369,31 +366,26 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
// two or one and three so we keep going. We can only stop prediction when
// we need exact ambiguity detection when the sets look like
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
-//
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
return PredictionModegetSingleViableAlt(altsets)
}
-//
// Determines if every alternative subset in {@code altsets} contains more
// than one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if every {@link BitSet} in {@code altsets} has
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-//
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
return !PredictionModehasNonConflictingAltSet(altsets)
}
-//
// Determines if any single alternative subset in {@code altsets} contains
// exactly one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
-//
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
@@ -404,14 +396,12 @@ func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
return false
}
-//
// Determines if any single alternative subset in {@code altsets} contains
// more than one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-//
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
@@ -422,13 +412,11 @@ func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
return false
}
-//
// Determines if every alternative subset in {@code altsets} is equivalent.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if every member of {@code altsets} is equal to the
// others, otherwise {@code false}
-//
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
var first *BitSet
@@ -444,13 +432,11 @@ func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
return true
}
-//
// Returns the unique alternative predicted by all alternative subsets in
// {@code altsets}. If no such alternative exists, this method returns
// {@link ATN//INVALID_ALT_NUMBER}.
//
// @param altsets a collection of alternative subsets
-//
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
all := PredictionModeGetAlts(altsets)
if all.length() == 1 {
@@ -466,7 +452,6 @@ func PredictionModegetUniqueAlt(altsets []*BitSet) int {
//
// @param altsets a collection of alternative subsets
// @return the set of represented alternatives in {@code altsets}
-//
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
all := NewBitSet()
for _, alts := range altsets {
@@ -475,44 +460,35 @@ func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
return all
}
-//
-// This func gets the conflicting alt subsets from a configuration set.
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
// For each configuration {@code c} in {@code configs}:
//
//
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
// alt and not pred
//
-//
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
- configToAlts := make(map[int]*BitSet)
+ configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
for _, c := range configs.GetItems() {
- key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
- alts, ok := configToAlts[key]
+ alts, ok := configToAlts.Get(c)
if !ok {
alts = NewBitSet()
- configToAlts[key] = alts
+ configToAlts.Put(c, alts)
}
alts.add(c.GetAlt())
}
- values := make([]*BitSet, 0, 10)
- for _, v := range configToAlts {
- values = append(values, v)
- }
- return values
+ return configToAlts.Values()
}
-//
-// Get a map from state to alt subset from a configuration set. For each
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
// configuration {@code c} in {@code configs}:
//
//
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
//
-//
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
m := NewAltDict()
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
similarity index 92%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
index 93efcf355d8b..bfe542d09141 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -49,7 +49,7 @@ var tokenTypeMapCache = make(map[string]int)
var ruleIndexMapCache = make(map[string]int)
func (b *BaseRecognizer) checkVersion(toolVersion string) {
- runtimeVersion := "4.10.1"
+ runtimeVersion := "4.12.0"
if runtimeVersion != toolVersion {
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
}
@@ -108,7 +108,6 @@ func (b *BaseRecognizer) SetState(v int) {
// Get a map from rule names to rule indexes.
//
// Used for XPath and tree pattern compilation.
-//
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
panic("Method not defined!")
@@ -171,18 +170,18 @@ func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
}
// How should a token be displayed in an error message? The default
-// is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
+//
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
//
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
// implementations of {@link ANTLRErrorStrategy} may provide a similar
// feature when necessary. For example, see
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
-//
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
if t == nil {
return ""
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
similarity index 97%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
index 600cf8c0625c..210699ba2341 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
similarity index 85%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
index 9ada430779c1..a702e99def76 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -18,12 +18,12 @@ import (
//
type SemanticContext interface {
- comparable
+ Equals(other Collectable[SemanticContext]) bool
+ Hash() int
evaluate(parser Recognizer, outerContext RuleContext) bool
evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
- hash() int
String() string
}
@@ -78,7 +78,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
//The default {@link SemanticContext}, which is semantically equivalent to
//a predicate of the form {@code {true}?}.
-var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
+var SemanticContextNone = NewPredicate(-1, -1, false)
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
return p
@@ -95,7 +95,7 @@ func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
}
-func (p *Predicate) equals(other interface{}) bool {
+func (p *Predicate) Equals(other Collectable[SemanticContext]) bool {
if p == other {
return true
} else if _, ok := other.(*Predicate); !ok {
@@ -107,7 +107,7 @@ func (p *Predicate) equals(other interface{}) bool {
}
}
-func (p *Predicate) hash() int {
+func (p *Predicate) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, p.ruleIndex)
h = murmurUpdate(h, p.predIndex)
@@ -151,17 +151,22 @@ func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
return p.precedence - other.precedence
}
-func (p *PrecedencePredicate) equals(other interface{}) bool {
- if p == other {
- return true
- } else if _, ok := other.(*PrecedencePredicate); !ok {
+func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool {
+
+ var op *PrecedencePredicate
+ var ok bool
+ if op, ok = other.(*PrecedencePredicate); !ok {
return false
- } else {
- return p.precedence == other.(*PrecedencePredicate).precedence
}
+
+ if p == op {
+ return true
+ }
+
+ return p.precedence == other.(*PrecedencePredicate).precedence
}
-func (p *PrecedencePredicate) hash() int {
+func (p *PrecedencePredicate) Hash() int {
h := uint32(1)
h = 31*h + uint32(p.precedence)
return int(h)
@@ -171,10 +176,10 @@ func (p *PrecedencePredicate) String() string {
return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
}
-func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate {
+func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate {
result := make([]*PrecedencePredicate, 0)
- set.Each(func(v interface{}) bool {
+ set.Each(func(v SemanticContext) bool {
if c2, ok := v.(*PrecedencePredicate); ok {
result = append(result, c2)
}
@@ -193,21 +198,21 @@ type AND struct {
func NewAND(a, b SemanticContext) *AND {
- operands := newArray2DHashSet(nil, nil)
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
- operands.Add(o)
+ operands.Put(o)
}
} else {
- operands.Add(a)
+ operands.Put(a)
}
if ba, ok := b.(*AND); ok {
for _, o := range ba.opnds {
- operands.Add(o)
+ operands.Put(o)
}
} else {
- operands.Add(b)
+ operands.Put(b)
}
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
if len(precedencePredicates) > 0 {
@@ -220,7 +225,7 @@ func NewAND(a, b SemanticContext) *AND {
}
}
- operands.Add(reduced)
+ operands.Put(reduced)
}
vs := operands.Values()
@@ -235,14 +240,15 @@ func NewAND(a, b SemanticContext) *AND {
return and
}
-func (a *AND) equals(other interface{}) bool {
+func (a *AND) Equals(other Collectable[SemanticContext]) bool {
if a == other {
return true
- } else if _, ok := other.(*AND); !ok {
+ }
+ if _, ok := other.(*AND); !ok {
return false
} else {
for i, v := range other.(*AND).opnds {
- if !a.opnds[i].equals(v) {
+ if !a.opnds[i].Equals(v) {
return false
}
}
@@ -250,13 +256,11 @@ func (a *AND) equals(other interface{}) bool {
}
}
-//
// {@inheritDoc}
//
//
// The evaluation of predicates by a context is short-circuiting, but
// unordered.
-//
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
for i := 0; i < len(a.opnds); i++ {
if !a.opnds[i].evaluate(parser, outerContext) {
@@ -304,18 +308,18 @@ func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) Semant
return result
}
-func (a *AND) hash() int {
+func (a *AND) Hash() int {
h := murmurInit(37) // Init with a value different from OR
for _, op := range a.opnds {
- h = murmurUpdate(h, op.hash())
+ h = murmurUpdate(h, op.Hash())
}
return murmurFinish(h, len(a.opnds))
}
-func (a *OR) hash() int {
+func (a *OR) Hash() int {
h := murmurInit(41) // Init with a value different from AND
for _, op := range a.opnds {
- h = murmurUpdate(h, op.hash())
+ h = murmurUpdate(h, op.Hash())
}
return murmurFinish(h, len(a.opnds))
}
@@ -345,21 +349,21 @@ type OR struct {
func NewOR(a, b SemanticContext) *OR {
- operands := newArray2DHashSet(nil, nil)
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
- operands.Add(o)
+ operands.Put(o)
}
} else {
- operands.Add(a)
+ operands.Put(a)
}
if ba, ok := b.(*OR); ok {
for _, o := range ba.opnds {
- operands.Add(o)
+ operands.Put(o)
}
} else {
- operands.Add(b)
+ operands.Put(b)
}
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
if len(precedencePredicates) > 0 {
@@ -372,7 +376,7 @@ func NewOR(a, b SemanticContext) *OR {
}
}
- operands.Add(reduced)
+ operands.Put(reduced)
}
vs := operands.Values()
@@ -388,14 +392,14 @@ func NewOR(a, b SemanticContext) *OR {
return o
}
-func (o *OR) equals(other interface{}) bool {
+func (o *OR) Equals(other Collectable[SemanticContext]) bool {
if o == other {
return true
} else if _, ok := other.(*OR); !ok {
return false
} else {
for i, v := range other.(*OR).opnds {
- if !o.opnds[i].equals(v) {
+ if !o.opnds[i].Equals(v) {
return false
}
}
@@ -406,7 +410,6 @@ func (o *OR) equals(other interface{}) bool {
//
// The evaluation of predicates by o context is short-circuiting, but
// unordered.
-//
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
for i := 0; i < len(o.opnds); i++ {
if o.opnds[i].evaluate(parser, outerContext) {
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
similarity index 98%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
index 2d8e99095d32..f73b06bc6a05 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -158,7 +158,6 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start
// {@link Token//GetInputStream}.
//
// @param oldToken The token to copy.
-//
func (c *CommonToken) clone() *CommonToken {
t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
t.tokenIndex = c.GetTokenIndex()
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
similarity index 85%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
index e023978fef4d..a3f36eaa67f9 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
similarity index 87%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
index df92c8147897..1527d43f608b 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
similarity index 58%
rename from cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
rename to cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
index 96a03f02aa6b..b3e38af34454 100644
--- a/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
+++ b/cluster-autoscaler/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
@@ -1,15 +1,15 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
+
package antlr
import (
-"bytes"
-"fmt"
+ "bytes"
+ "fmt"
)
-
-//
+//
// Useful for rewriting out a buffered input token stream after doing some
// augmentation or other manipulations on it.
@@ -85,12 +85,10 @@ import (
// If you don't use named rewrite streams, a "default" stream is used as the
// first example shows.
-
-
-const(
+const (
Default_Program_Name = "default"
- Program_Init_Size = 100
- Min_Token_Index = 0
+ Program_Init_Size = 100
+ Min_Token_Index = 0
)
// Define the rewrite operation hierarchy
@@ -98,13 +96,13 @@ const(
type RewriteOperation interface {
// Execute the rewrite operation by possibly adding to the buffer.
// Return the index of the next token to operate on.
- Execute(buffer *bytes.Buffer) int
- String() string
- GetInstructionIndex() int
- GetIndex() int
- GetText() string
- GetOpName() string
- GetTokens() TokenStream
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
SetInstructionIndex(val int)
SetIndex(int)
SetText(string)
@@ -114,63 +112,62 @@ type RewriteOperation interface {
type BaseRewriteOperation struct {
//Current index of rewrites list
- instruction_index int
+ instruction_index int
//Token buffer index
- index int
+ index int
//Substitution text
- text string
+ text string
//Actual operation name
- op_name string
+ op_name string
//Pointer to token steam
- tokens TokenStream
+ tokens TokenStream
}
-func (op *BaseRewriteOperation)GetInstructionIndex() int{
+func (op *BaseRewriteOperation) GetInstructionIndex() int {
return op.instruction_index
}
-func (op *BaseRewriteOperation)GetIndex() int{
+func (op *BaseRewriteOperation) GetIndex() int {
return op.index
}
-func (op *BaseRewriteOperation)GetText() string{
+func (op *BaseRewriteOperation) GetText() string {
return op.text
}
-func (op *BaseRewriteOperation)GetOpName() string{
+func (op *BaseRewriteOperation) GetOpName() string {
return op.op_name
}
-func (op *BaseRewriteOperation)GetTokens() TokenStream{
+func (op *BaseRewriteOperation) GetTokens() TokenStream {
return op.tokens
}
-func (op *BaseRewriteOperation)SetInstructionIndex(val int){
+func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
op.instruction_index = val
}
-func (op *BaseRewriteOperation)SetIndex(val int) {
+func (op *BaseRewriteOperation) SetIndex(val int) {
op.index = val
}
-func (op *BaseRewriteOperation)SetText(val string){
+func (op *BaseRewriteOperation) SetText(val string) {
op.text = val
}
-func (op *BaseRewriteOperation)SetOpName(val string){
+func (op *BaseRewriteOperation) SetOpName(val string) {
op.op_name = val
}
-func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
+func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
op.tokens = val
}
-
-func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
return op.index
}
-func (op *BaseRewriteOperation) String() string {
+func (op *BaseRewriteOperation) String() string {
return fmt.Sprintf("<%s@%d:\"%s\">",
op.op_name,
op.tokens.Get(op.GetIndex()),
@@ -179,26 +176,25 @@ func (op *BaseRewriteOperation) String() string {
}
-
type InsertBeforeOp struct {
BaseRewriteOperation
}
-func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
- return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
- index:index,
- text:text,
- op_name:"InsertBeforeOp",
- tokens:stream,
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
+ return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index,
+ text: text,
+ op_name: "InsertBeforeOp",
+ tokens: stream,
}}
}
-func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
buffer.WriteString(op.tokens.Get(op.index).GetText())
}
- return op.index+1
+ return op.index + 1
}
func (op *InsertBeforeOp) String() string {
@@ -213,20 +209,20 @@ type InsertAfterOp struct {
BaseRewriteOperation
}
-func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
- return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
- index:index+1,
- text:text,
- tokens:stream,
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
+ return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
}}
}
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
buffer.WriteString(op.tokens.Get(op.index).GetText())
}
- return op.index+1
+ return op.index + 1
}
func (op *InsertAfterOp) String() string {
@@ -235,28 +231,28 @@ func (op *InsertAfterOp) String() string {
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
// instructions.
-type ReplaceOp struct{
+type ReplaceOp struct {
BaseRewriteOperation
LastIndex int
}
-func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
+func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
return &ReplaceOp{
- BaseRewriteOperation:BaseRewriteOperation{
- index:from,
- text:text,
- op_name:"ReplaceOp",
- tokens:stream,
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: from,
+ text: text,
+ op_name: "ReplaceOp",
+ tokens: stream,
},
- LastIndex:to,
+ LastIndex: to,
}
}
-func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
- if op.text != ""{
+func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
+ if op.text != "" {
buffer.WriteString(op.text)
}
- return op.LastIndex +1
+ return op.LastIndex + 1
}
func (op *ReplaceOp) String() string {
@@ -268,54 +264,54 @@ func (op *ReplaceOp) String() string {
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
}
-
type TokenStreamRewriter struct {
//Our source stream
- tokens TokenStream
+ tokens TokenStream
// You may have multiple, named streams of rewrite operations.
// I'm calling these things "programs."
// Maps String (name) → rewrite (List)
- programs map[string][]RewriteOperation
- last_rewrite_token_indexes map[string]int
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
}
-func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
return &TokenStreamRewriter{
- tokens: tokens,
- programs: map[string][]RewriteOperation{
- Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
},
- last_rewrite_token_indexes: map[string]int{},
+ last_rewrite_token_indexes: map[string]int{},
}
}
-func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
return tsr.tokens
}
-// Rollback the instruction stream for a program so that
-// the indicated instruction (via instructionIndex) is no
-// longer in the stream. UNTESTED!
-func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
- is, ok := tsr.programs[program_name]
- if ok{
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
+ is, ok := tsr.programs[program_name]
+ if ok {
tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
}
}
-func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
tsr.Rollback(Default_Program_Name, instruction_index)
}
-//Reset the program so that no instructions exist
-func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
+
+// Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
}
-func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
+func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
tsr.DeleteProgram(Default_Program_Name)
}
-func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
// to insert after, just insert before next index (even if past end)
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
@@ -323,31 +319,31 @@ func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text
tsr.AddToProgram(program_name, op)
}
-func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
tsr.InsertAfter(Default_Program_Name, index, text)
}
-func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
}
-func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
+func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
tsr.AddToProgram(program_name, op)
}
-func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
tsr.InsertBefore(Default_Program_Name, index, text)
}
-func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
}
-func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
- if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
from, to, tsr.tokens.Size()))
}
@@ -357,207 +353,216 @@ func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text
tsr.AddToProgram(program_name, op)
}
-func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
+func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
tsr.Replace(Default_Program_Name, from, to, text)
}
-func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
+func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
tsr.ReplaceDefault(index, index, text)
}
-func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
+func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
}
-func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
+func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
tsr.ReplaceToken(Default_Program_Name, from, to, text)
}
-func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
+func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
tsr.ReplaceTokenDefault(index, index, text)
}
-func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
- tsr.Replace(program_name, from, to, "" )
+func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
+ tsr.Replace(program_name, from, to, "")
}
-func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
+func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
tsr.Delete(Default_Program_Name, from, to)
}
-func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
- tsr.DeleteDefault(index,index)
+func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
+ tsr.DeleteDefault(index, index)
}
-func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
+func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
tsr.ReplaceToken(program_name, from, to, "")
}
-func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
+func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
tsr.DeleteToken(Default_Program_Name, from, to)
}
-func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
i, ok := tsr.last_rewrite_token_indexes[program_name]
- if !ok{
+ if !ok {
return -1
}
return i
}
-func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
}
-func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
tsr.last_rewrite_token_indexes[program_name] = i
}
-func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
+func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
is := make([]RewriteOperation, 0, Program_Init_Size)
tsr.programs[name] = is
return is
}
-func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
+func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
is := tsr.GetProgram(name)
is = append(is, op)
tsr.programs[name] = is
}
-func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
+func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
is, ok := tsr.programs[name]
- if !ok{
+ if !ok {
is = tsr.InitializeProgram(name)
}
return is
}
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter)GetTextDefault() string{
+
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetTextDefault() string {
return tsr.GetText(
Default_Program_Name,
NewInterval(0, tsr.tokens.Size()-1))
}
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
+
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
rewrites := tsr.programs[program_name]
start := interval.Start
- stop := interval.Stop
+ stop := interval.Stop
// ensure start/end are in range
stop = min(stop, tsr.tokens.Size()-1)
- start = max(start,0)
- if rewrites == nil || len(rewrites) == 0{
+ start = max(start, 0)
+ if rewrites == nil || len(rewrites) == 0 {
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
}
buf := bytes.Buffer{}
// First, optimize instruction stream
indexToOp := reduceToSingleOperationPerIndex(rewrites)
// Walk buffer, executing instructions and emitting tokens
- for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
+ for _, op := range indexToOp {
+ if op.GetIndex() >= tsr.tokens.Size()-1 {
+ buf.WriteString(op.GetText())
+ }
}
}
return buf.String()
}
-// We need to combine operations and report invalid operations (like
-// overlapping replaces that are not completed nested). Inserts to
-// same index need to be combined etc... Here are the cases:
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
//
-// I.i.u I.j.v leave alone, nonoverlapping
-// I.i.u I.i.v combine: Iivu
+// I.i.u I.j.v leave alone, nonoverlapping
+// I.i.u I.i.v combine: Iivu
//
-// R.i-j.u R.x-y.v | i-j in x-y delete first R
-// R.i-j.u R.i-j.v delete first R
-// R.i-j.u R.x-y.v | x-y in i-j ERROR
-// R.i-j.u R.x-y.v | boundaries overlap ERROR
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
//
-// Delete special case of replace (text==null):
-// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
//
-// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
-// we're not deleting i)
-// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
-// R.x-y.v I.i.u | i in x-y ERROR
-// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
-// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
//
-// I.i.u = insert u before op @ index i
-// R.x-y.u = replace x-y indexed tokens with u
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
//
-// First we need to examine replaces. For any replace op:
+// First we need to examine replaces. For any replace op:
//
-// 1. wipe out any insertions before op within that range.
-// 2. Drop any replace op before that is contained completely within
-// that range.
-// 3. Throw exception upon boundary overlap with any previous replace.
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
//
-// Then we can deal with inserts:
+// Then we can deal with inserts:
//
-// 1. for any inserts to same index, combine even if not adjacent.
-// 2. for any prior replace with same left boundary, combine this
-// insert with replace and delete this replace.
-// 3. throw exception if index in same range as previous replace
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
//
-// Don't actually delete; make op null in list. Easier to walk list.
-// Later we can throw as we add to index → op map.
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
//
-// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-// inserted stuff would be before the replace range. But, if you
-// add tokens in front of a method body '{' and then delete the method
-// body, I think the stuff before the '{' you added should disappear too.
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
//
-// Return a map from token index to operation.
-//
-func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
+// Return a map from token index to operation.
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
// WALK REPLACES
- for i:=0; i < len(rewrites); i++{
+ for i := 0; i < len(rewrites); i++ {
op := rewrites[i]
- if op == nil{continue}
+ if op == nil {
+ continue
+ }
rop, ok := op.(*ReplaceOp)
- if !ok{continue}
+ if !ok {
+ continue
+ }
// Wipe prior inserts within range
- for j:=0; j rop.index && iop.index <=rop.LastIndex{
+ } else if iop.index > rop.index && iop.index <= rop.LastIndex {
// delete insert as it's a no-op.
rewrites[iop.instruction_index] = nil
}
}
}
// Drop any prior replaces contained within
- for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if prevop, ok := rewrites[j].(*ReplaceOp); ok {
+ if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
// delete replace as it's a no-op.
rewrites[prevop.instruction_index] = nil
continue
@@ -566,61 +571,67 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
// Delete special case of replace (text==null):
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
- if prevop.text == "" && rop.text == "" && !disjoint{
+ if prevop.text == "" && rop.text == "" && !disjoint {
rewrites[prevop.instruction_index] = nil
rop.index = min(prevop.index, rop.index)
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
- }else if !disjoint{
+ } else if !disjoint {
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
}
}
}
}
// WALK INSERTS
- for i:=0; i < len(rewrites); i++ {
+ for i := 0; i < len(rewrites); i++ {
op := rewrites[i]
- if op == nil{continue}
+ if op == nil {
+ continue
+ }
//hack to replicate inheritance in composition
_, iok := rewrites[i].(*InsertBeforeOp)
_, aok := rewrites[i].(*InsertAfterOp)
- if !iok && !aok{continue}
+ if !iok && !aok {
+ continue
+ }
iop := rewrites[i]
// combine current insert with prior if any at same index
// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
- for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{
- panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
+ if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
+ panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
}
}
}
}
m := map[int]RewriteOperation{}
- for i:=0; i < len(rewrites); i++{
+ for i := 0; i < len(rewrites); i++ {
op := rewrites[i]
- if op == nil {continue}
- if _, ok := m[op.GetIndex()]; ok{
+ if op == nil {
+ continue
+ }
+ if _, ok := m[op.GetIndex()]; ok {
panic("should only be one op per index")
}
m[op.GetIndex()] = op
@@ -628,22 +639,21 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
return m
}
-
/*
Quick fixing Go lack of overloads
- */
+*/
-func max(a,b int)int{
- if a>b{
+func max(a, b int) int {
+ if a > b {
return a
- }else {
+ } else {
return b
}
}
-func min(a,b int)int{
- if a as.threshold {
as.expand()
}
@@ -98,7 +96,7 @@ func (as *array2DHashSet) expand() {
b := as.getBuckets(o)
bucketLength := newBucketLengths[b]
- var newBucket []interface{}
+ var newBucket []Collectable[any]
if bucketLength == 0 {
// new bucket
newBucket = as.createBucket(as.initialBucketCapacity)
@@ -107,7 +105,7 @@ func (as *array2DHashSet) expand() {
newBucket = newTable[b]
if bucketLength == len(newBucket) {
// expand
- newBucketCopy := make([]interface{}, len(newBucket)<<1)
+ newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
copy(newBucketCopy[:bucketLength], newBucket)
newBucket = newBucketCopy
newTable[b] = newBucket
@@ -124,7 +122,7 @@ func (as *array2DHashSet) Len() int {
return as.n
}
-func (as *array2DHashSet) Get(o interface{}) interface{} {
+func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
if o == nil {
return nil
}
@@ -147,7 +145,7 @@ func (as *array2DHashSet) Get(o interface{}) interface{} {
return nil
}
-func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
+func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
b := as.getBuckets(o)
bucket := as.buckets[b]
@@ -178,7 +176,7 @@ func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
// full bucket, expand and add to end
oldLength := len(bucket)
- bucketCopy := make([]interface{}, oldLength<<1)
+ bucketCopy := make([]Collectable[any], oldLength<<1)
copy(bucketCopy[:oldLength], bucket)
bucket = bucketCopy
as.buckets[b] = bucket
@@ -187,22 +185,22 @@ func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
return o
}
-func (as *array2DHashSet) getBuckets(value interface{}) int {
+func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
hash := as.hashcodeFunction(value)
return hash & (len(as.buckets) - 1)
}
-func (as *array2DHashSet) createBuckets(cap int) [][]interface{} {
- return make([][]interface{}, cap)
+func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
+ return make([][]Collectable[any], cap)
}
-func (as *array2DHashSet) createBucket(cap int) []interface{} {
- return make([]interface{}, cap)
+func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
+ return make([]Collectable[any], cap)
}
func newArray2DHashSetWithCap(
hashcodeFunction func(interface{}) int,
- equalsFunction func(interface{}, interface{}) bool,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
initCap int,
initBucketCap int,
) *array2DHashSet {
@@ -231,7 +229,7 @@ func newArray2DHashSetWithCap(
func newArray2DHashSet(
hashcodeFunction func(interface{}) int,
- equalsFunction func(interface{}, interface{}) bool,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
) *array2DHashSet {
return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
}
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.gitignore b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.gitignore
new file mode 100644
index 000000000000..8d69a9418aa3
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.gitignore
@@ -0,0 +1,15 @@
+bin/
+.idea/
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.travis.yml b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.travis.yml
new file mode 100644
index 000000000000..bb83c6670df6
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+dist: xenial
+go:
+ - '1.10'
+ - '1.11'
+ - '1.12'
+ - '1.13'
+ - 'tip'
+
+script:
+ - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000000..4b462b0d81b1
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
@@ -0,0 +1,43 @@
+# Contributor Code of Conduct
+
+This project adheres to [The Code Manifesto](http://codemanifesto.com)
+as its guidelines for contributor interactions.
+
+## The Code Manifesto
+
+We want to work in an ecosystem that empowers developers to reach their
+potential — one that encourages growth and effective collaboration. A space
+that is safe for all.
+
+A space such as this benefits everyone that participates in it. It encourages
+new developers to enter our field. It is through discussion and collaboration
+that we grow, and through growth that we improve.
+
+In the effort to create such a place, we hold to these values:
+
+1. **Discrimination limits us.** This includes discrimination on the basis of
+ race, gender, sexual orientation, gender identity, age, nationality,
+ technology and any other arbitrary exclusion of a group of people.
+2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort
+ levels. Remember that, and if brought to your attention, heed it.
+3. **We are our biggest assets.** None of us were born masters of our trade.
+ Each of us has been helped along the way. Return that favor, when and where
+ you can.
+4. **We are resources for the future.** As an extension of #3, share what you
+ know. Make yourself a resource to help those that come after you.
+5. **Respect defines us.** Treat others as you wish to be treated. Make your
+ discussions, criticisms and debates from a position of respectfulness. Ask
+ yourself, is it true? Is it necessary? Is it constructive? Anything less is
+ unacceptable.
+6. **Reactions require grace.** Angry responses are valid, but abusive language
+ and vindictive actions are toxic. When something happens that offends you,
+ handle it assertively, but be respectful. Escalate reasonably, and try to
+ allow the offender an opportunity to explain themselves, and possibly
+ correct the issue.
+7. **Opinions are just that: opinions.** Each and every one of us, due to our
+ background and upbringing, have varying opinions. That is perfectly
+ acceptable. Remember this: if you respect your own opinions, you should
+ respect the opinions of others.
+8. **To err is human.** You might not intend it, but mistakes do happen and
+ contribute to build experience. Tolerate honest mistakes, and don't
+ hesitate to apologize if you make one yourself.
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
new file mode 100644
index 000000000000..7ed268a1edd9
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
@@ -0,0 +1,63 @@
+#### Support
+If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
+
+#### What to contribute
+If you don't know what to do, there are some features and functions that need to be done
+
+- [ ] Refactor code
+- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
+- [ ] Create actual list of contributors and projects that currently using this package
+- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
+- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
+- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
+- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
+- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
+- [ ] Implement fuzzing testing
+- [ ] Implement some struct/map/array utilities
+- [ ] Implement map/array validation
+- [ ] Implement benchmarking
+- [ ] Implement batch of examples
+- [ ] Look at forks for new features and fixes
+
+#### Advice
+Feel free to create what you want, but keep in mind when you implement new features:
+- Code must be clear and readable, names of variables/constants clearly describes what they are doing
+- Public functions must be documented and described in source file and added to README.md to the list of available functions
+- There are must be unit-tests for any new functions and improvements
+
+## Financial contributions
+
+We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
+Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
+
+
+## Credits
+
+
+### Contributors
+
+Thank you to all the people who have already contributed to govalidator!
+
+
+
+### Backers
+
+Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
+
+
+
+
+### Sponsors
+
+Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/LICENSE b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/LICENSE
new file mode 100644
index 000000000000..cacba9102400
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2020 Alex Saskevich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/README.md b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/README.md
new file mode 100644
index 000000000000..2c3fc35eb644
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/README.md
@@ -0,0 +1,622 @@
+govalidator
+===========
+[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator)
+[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator)
+[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
+
+A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
+
+#### Installation
+Make sure that Go is installed on your computer.
+Type the following command in your terminal:
+
+ go get github.com/asaskevich/govalidator
+
+or you can get specified release of the package with `gopkg.in`:
+
+ go get gopkg.in/asaskevich/govalidator.v10
+
+After it the package is ready to use.
+
+
+#### Import package in your project
+Add following line in your `*.go` file:
+```go
+import "github.com/asaskevich/govalidator"
+```
+If you are unhappy to use long `govalidator`, you can do something like this:
+```go
+import (
+ valid "github.com/asaskevich/govalidator"
+)
+```
+
+#### Activate behavior to require all fields have a validation tag by default
+`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
+
+`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
+
+```go
+import "github.com/asaskevich/govalidator"
+
+func init() {
+ govalidator.SetFieldsRequiredByDefault(true)
+}
+```
+
+Here's some code to explain it:
+```go
+// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+type exampleStruct struct {
+ Name string ``
+ Email string `valid:"email"`
+}
+
+// this, however, will only fail when Email is empty or an invalid email address:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email"`
+}
+
+// lastly, this will only fail when Email is an invalid email address but not when it's empty:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email,optional"`
+}
+```
+
+#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
+##### Custom validator function signature
+A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
+```go
+import "github.com/asaskevich/govalidator"
+
+// old signature
+func(i interface{}) bool
+
+// new signature
+func(i interface{}, o interface{}) bool
+```
+
+##### Adding a custom validator
+This was changed to prevent data races when accessing custom validators.
+```go
+import "github.com/asaskevich/govalidator"
+
+// before
+govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool {
+ // ...
+}
+
+// after
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool {
+ // ...
+})
+```
+
+#### List of functions:
+```go
+func Abs(value float64) float64
+func BlackList(str, chars string) string
+func ByteLength(str string, params ...string) bool
+func CamelCaseToUnderscore(str string) string
+func Contains(str, substring string) bool
+func Count(array []interface{}, iterator ConditionIterator) int
+func Each(array []interface{}, iterator Iterator)
+func ErrorByField(e error, field string) string
+func ErrorsByField(e error) map[string]string
+func Filter(array []interface{}, iterator ConditionIterator) []interface{}
+func Find(array []interface{}, iterator ConditionIterator) interface{}
+func GetLine(s string, index int) (string, error)
+func GetLines(s string) []string
+func HasLowerCase(str string) bool
+func HasUpperCase(str string) bool
+func HasWhitespace(str string) bool
+func HasWhitespaceOnly(str string) bool
+func InRange(value interface{}, left interface{}, right interface{}) bool
+func InRangeFloat32(value, left, right float32) bool
+func InRangeFloat64(value, left, right float64) bool
+func InRangeInt(value, left, right interface{}) bool
+func IsASCII(str string) bool
+func IsAlpha(str string) bool
+func IsAlphanumeric(str string) bool
+func IsBase64(str string) bool
+func IsByteLength(str string, min, max int) bool
+func IsCIDR(str string) bool
+func IsCRC32(str string) bool
+func IsCRC32b(str string) bool
+func IsCreditCard(str string) bool
+func IsDNSName(str string) bool
+func IsDataURI(str string) bool
+func IsDialString(str string) bool
+func IsDivisibleBy(str, num string) bool
+func IsEmail(str string) bool
+func IsExistingEmail(email string) bool
+func IsFilePath(str string) (bool, int)
+func IsFloat(str string) bool
+func IsFullWidth(str string) bool
+func IsHalfWidth(str string) bool
+func IsHash(str string, algorithm string) bool
+func IsHexadecimal(str string) bool
+func IsHexcolor(str string) bool
+func IsHost(str string) bool
+func IsIP(str string) bool
+func IsIPv4(str string) bool
+func IsIPv6(str string) bool
+func IsISBN(str string, version int) bool
+func IsISBN10(str string) bool
+func IsISBN13(str string) bool
+func IsISO3166Alpha2(str string) bool
+func IsISO3166Alpha3(str string) bool
+func IsISO4217(str string) bool
+func IsISO693Alpha2(str string) bool
+func IsISO693Alpha3b(str string) bool
+func IsIn(str string, params ...string) bool
+func IsInRaw(str string, params ...string) bool
+func IsInt(str string) bool
+func IsJSON(str string) bool
+func IsLatitude(str string) bool
+func IsLongitude(str string) bool
+func IsLowerCase(str string) bool
+func IsMAC(str string) bool
+func IsMD4(str string) bool
+func IsMD5(str string) bool
+func IsMagnetURI(str string) bool
+func IsMongoID(str string) bool
+func IsMultibyte(str string) bool
+func IsNatural(value float64) bool
+func IsNegative(value float64) bool
+func IsNonNegative(value float64) bool
+func IsNonPositive(value float64) bool
+func IsNotNull(str string) bool
+func IsNull(str string) bool
+func IsNumeric(str string) bool
+func IsPort(str string) bool
+func IsPositive(value float64) bool
+func IsPrintableASCII(str string) bool
+func IsRFC3339(str string) bool
+func IsRFC3339WithoutZone(str string) bool
+func IsRGBcolor(str string) bool
+func IsRegex(str string) bool
+func IsRequestURI(rawurl string) bool
+func IsRequestURL(rawurl string) bool
+func IsRipeMD128(str string) bool
+func IsRipeMD160(str string) bool
+func IsRsaPub(str string, params ...string) bool
+func IsRsaPublicKey(str string, keylen int) bool
+func IsSHA1(str string) bool
+func IsSHA256(str string) bool
+func IsSHA384(str string) bool
+func IsSHA512(str string) bool
+func IsSSN(str string) bool
+func IsSemver(str string) bool
+func IsTiger128(str string) bool
+func IsTiger160(str string) bool
+func IsTiger192(str string) bool
+func IsTime(str string, format string) bool
+func IsType(v interface{}, params ...string) bool
+func IsURL(str string) bool
+func IsUTFDigit(str string) bool
+func IsUTFLetter(str string) bool
+func IsUTFLetterNumeric(str string) bool
+func IsUTFNumeric(str string) bool
+func IsUUID(str string) bool
+func IsUUIDv3(str string) bool
+func IsUUIDv4(str string) bool
+func IsUUIDv5(str string) bool
+func IsULID(str string) bool
+func IsUnixTime(str string) bool
+func IsUpperCase(str string) bool
+func IsVariableWidth(str string) bool
+func IsWhole(value float64) bool
+func LeftTrim(str, chars string) string
+func Map(array []interface{}, iterator ResultIterator) []interface{}
+func Matches(str, pattern string) bool
+func MaxStringLength(str string, params ...string) bool
+func MinStringLength(str string, params ...string) bool
+func NormalizeEmail(str string) (string, error)
+func PadBoth(str string, padStr string, padLen int) string
+func PadLeft(str string, padStr string, padLen int) string
+func PadRight(str string, padStr string, padLen int) string
+func PrependPathToErrors(err error, path string) error
+func Range(str string, params ...string) bool
+func RemoveTags(s string) string
+func ReplacePattern(str, pattern, replace string) string
+func Reverse(s string) string
+func RightTrim(str, chars string) string
+func RuneLength(str string, params ...string) bool
+func SafeFileName(str string) string
+func SetFieldsRequiredByDefault(value bool)
+func SetNilPtrAllowedByRequired(value bool)
+func Sign(value float64) float64
+func StringLength(str string, params ...string) bool
+func StringMatches(s string, params ...string) bool
+func StripLow(str string, keepNewLines bool) string
+func ToBoolean(str string) (bool, error)
+func ToFloat(str string) (float64, error)
+func ToInt(value interface{}) (res int64, err error)
+func ToJSON(obj interface{}) (string, error)
+func ToString(obj interface{}) string
+func Trim(str, chars string) string
+func Truncate(str string, length int, ending string) string
+func TruncatingErrorf(str string, args ...interface{}) error
+func UnderscoreToCamelCase(s string) string
+func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error)
+func ValidateStruct(s interface{}) (bool, error)
+func WhiteList(str, chars string) string
+type ConditionIterator
+type CustomTypeValidator
+type Error
+func (e Error) Error() string
+type Errors
+func (es Errors) Error() string
+func (es Errors) Errors() []error
+type ISO3166Entry
+type ISO693Entry
+type InterfaceParamValidator
+type Iterator
+type ParamValidator
+type ResultIterator
+type UnsupportedTypeError
+func (e *UnsupportedTypeError) Error() string
+type Validator
+```
+
+#### Examples
+###### IsURL
+```go
+println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
+```
+###### IsType
+```go
+println(govalidator.IsType("Bob", "string"))
+println(govalidator.IsType(1, "int"))
+i := 1
+println(govalidator.IsType(&i, "*int"))
+```
+
+IsType can be used through the tag `type` which is essential for map validation:
+```go
+type User struct {
+ Name string `valid:"type(string)"`
+ Age int `valid:"type(int)"`
+ Meta interface{} `valid:"type(string)"`
+}
+result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"})
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+###### ToString
+```go
+type User struct {
+ FirstName string
+ LastName string
+}
+
+str := govalidator.ToString(&User{"John", "Juan"})
+println(str)
+```
+###### Each, Map, Filter, Count for slices
+Each iterates over the slice/array and calls Iterator for every item
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.Iterator = func(value interface{}, index int) {
+ println(value.(int))
+}
+govalidator.Each(data, fn)
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
+ return value.(int) * 3
+}
+_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
+ return value.(int)%2 == 0
+}
+_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
+_ = govalidator.Count(data, fn) // result = 5
+```
+###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
+If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
+```go
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+```
+For completely custom validators (interface-based), see below.
+
+Here is a list of available validators for struct fields (validator - used function):
+```go
+"email": IsEmail,
+"url": IsURL,
+"dialstring": IsDialString,
+"requrl": IsRequestURL,
+"requri": IsRequestURI,
+"alpha": IsAlpha,
+"utfletter": IsUTFLetter,
+"alphanum": IsAlphanumeric,
+"utfletternum": IsUTFLetterNumeric,
+"numeric": IsNumeric,
+"utfnumeric": IsUTFNumeric,
+"utfdigit": IsUTFDigit,
+"hexadecimal": IsHexadecimal,
+"hexcolor": IsHexcolor,
+"rgbcolor": IsRGBcolor,
+"lowercase": IsLowerCase,
+"uppercase": IsUpperCase,
+"int": IsInt,
+"float": IsFloat,
+"null": IsNull,
+"uuid": IsUUID,
+"uuidv3": IsUUIDv3,
+"uuidv4": IsUUIDv4,
+"uuidv5": IsUUIDv5,
+"creditcard": IsCreditCard,
+"isbn10": IsISBN10,
+"isbn13": IsISBN13,
+"json": IsJSON,
+"multibyte": IsMultibyte,
+"ascii": IsASCII,
+"printableascii": IsPrintableASCII,
+"fullwidth": IsFullWidth,
+"halfwidth": IsHalfWidth,
+"variablewidth": IsVariableWidth,
+"base64": IsBase64,
+"datauri": IsDataURI,
+"ip": IsIP,
+"port": IsPort,
+"ipv4": IsIPv4,
+"ipv6": IsIPv6,
+"dns": IsDNSName,
+"host": IsHost,
+"mac": IsMAC,
+"latitude": IsLatitude,
+"longitude": IsLongitude,
+"ssn": IsSSN,
+"semver": IsSemver,
+"rfc3339": IsRFC3339,
+"rfc3339WithoutZone": IsRFC3339WithoutZone,
+"ISO3166Alpha2": IsISO3166Alpha2,
+"ISO3166Alpha3": IsISO3166Alpha3,
+"ulid": IsULID,
+```
+Validators with parameters
+
+```go
+"range(min|max)": Range,
+"length(min|max)": ByteLength,
+"runelength(min|max)": RuneLength,
+"stringlength(min|max)": StringLength,
+"matches(pattern)": StringMatches,
+"in(string1|string2|...|stringN)": IsIn,
+"rsapub(keylength)" : IsRsaPub,
+"minstringlength(int): MinStringLength,
+"maxstringlength(int): MaxStringLength,
+```
+Validators with parameters for any type
+
+```go
+"type(type)": IsType,
+```
+
+And here is small example of usage:
+```go
+type Post struct {
+ Title string `valid:"alphanum,required"`
+ Message string `valid:"duck,ascii"`
+ Message2 string `valid:"animal(dog)"`
+ AuthorIP string `valid:"ipv4"`
+ Date string `valid:"-"`
+}
+post := &Post{
+ Title: "My Example Post",
+ Message: "duck",
+ Message2: "dog",
+ AuthorIP: "123.234.54.3",
+}
+
+// Add your own struct validation tags
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+
+// Add your own struct validation tags with parameter
+govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool {
+ species := params[0]
+ return str == species
+})
+govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$")
+
+result, err := govalidator.ValidateStruct(post)
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338)
+If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}`
+
+So here is small example of usage:
+```go
+var mapTemplate = map[string]interface{}{
+ "name":"required,alpha",
+ "family":"required,alpha",
+ "email":"required,email",
+ "cell-phone":"numeric",
+ "address":map[string]interface{}{
+ "line1":"required,alphanum",
+ "line2":"alphanum",
+ "postal-code":"numeric",
+ },
+}
+
+var inputMap = map[string]interface{}{
+ "name":"Bob",
+ "family":"Smith",
+ "email":"foo@bar.baz",
+ "address":map[string]interface{}{
+ "line1":"",
+ "line2":"",
+ "postal-code":"",
+ },
+}
+
+result, err := govalidator.ValidateMap(inputMap, mapTemplate)
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+
+###### WhiteList
+```go
+// Remove all characters from string ignoring characters between "a" and "z"
+println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
+```
+
+###### Custom validation functions
+Custom validation using your own domain specific validators is also available - here's an example of how to use it:
+```go
+import "github.com/asaskevich/govalidator"
+
+type CustomByteArray [6]byte // custom types are supported and can be validated
+
+type StructWithCustomByteArray struct {
+ ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
+ Email string `valid:"email"`
+ CustomMinLength int `valid:"-"`
+}
+
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // you can type switch on the context interface being validated
+ case StructWithCustomByteArray:
+ // you can check and validate against some other field in the context,
+ // return early or not validate against the context at all – your choice
+ case SomeOtherType:
+ // ...
+ default:
+ // expecting some other type? Throw/panic here or continue
+ }
+
+ switch v := i.(type) { // type switch on the struct field being validated
+ case CustomByteArray:
+ for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
+ if e != 0 {
+ return true
+ }
+ }
+ }
+ return false
+})
+govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
+ case StructWithCustomByteArray:
+ return len(v.ID) >= v.CustomMinLength
+ }
+ return false
+})
+```
+
+###### Loop over Error()
+By default .Error() returns all errors in a single String. To access each error you can do this:
+```go
+ if err != nil {
+ errs := err.(govalidator.Errors).Errors()
+ for _, e := range errs {
+ fmt.Println(e.Error())
+ }
+ }
+```
+
+###### Custom error messages
+Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
+```go
+type Ticket struct {
+ Id int64 `json:"id"`
+ FirstName string `json:"firstname" valid:"required~First name is blank"`
+}
+```
+
+#### Notes
+Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
+Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
+
+#### Support
+If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
+
+#### What to contribute
+If you don't know what to do, there are some features and functions that need to be done
+
+- [ ] Refactor code
+- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
+- [ ] Create actual list of contributors and projects that currently using this package
+- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
+- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
+- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
+- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
+- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
+- [ ] Implement fuzzing testing
+- [ ] Implement some struct/map/array utilities
+- [ ] Implement map/array validation
+- [ ] Implement benchmarking
+- [ ] Implement batch of examples
+- [ ] Look at forks for new features and fixes
+
+#### Advice
+Feel free to create what you want, but keep in mind when you implement new features:
+- Code must be clear and readable, names of variables/constants clearly describes what they are doing
+- Public functions must be documented and described in source file and added to README.md to the list of available functions
+- There are must be unit-tests for any new functions and improvements
+
+## Credits
+### Contributors
+
+This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
+
+#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
+* [Daniel Lohse](https://github.com/annismckenzie)
+* [Attila Oláh](https://github.com/attilaolah)
+* [Daniel Korner](https://github.com/Dadie)
+* [Steven Wilkin](https://github.com/stevenwilkin)
+* [Deiwin Sarjas](https://github.com/deiwin)
+* [Noah Shibley](https://github.com/slugmobile)
+* [Nathan Davies](https://github.com/nathj07)
+* [Matt Sanford](https://github.com/mzsanford)
+* [Simon ccl1115](https://github.com/ccl1115)
+
+
+
+
+### Backers
+
+Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
+
+
+
+
+### Sponsors
+
+Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## License
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/arrays.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/arrays.go
new file mode 100644
index 000000000000..3e1da7cb480e
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/arrays.go
@@ -0,0 +1,87 @@
+package govalidator
+
+// Iterator is the function that accepts element of slice/array and its index
+type Iterator func(interface{}, int)
+
+// ResultIterator is the function that accepts element of slice/array and its index and returns any result
+type ResultIterator func(interface{}, int) interface{}
+
+// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
+type ConditionIterator func(interface{}, int) bool
+
+// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values
+type ReduceIterator func(interface{}, interface{}) interface{}
+
+// Some validates that any item of array corresponds to ConditionIterator. Returns boolean.
+func Some(array []interface{}, iterator ConditionIterator) bool {
+ res := false
+ for index, data := range array {
+ res = res || iterator(data, index)
+ }
+ return res
+}
+
+// Every validates that every item of array corresponds to ConditionIterator. Returns boolean.
+func Every(array []interface{}, iterator ConditionIterator) bool {
+ res := true
+ for index, data := range array {
+ res = res && iterator(data, index)
+ }
+ return res
+}
+
+// Reduce boils down a list of values into a single value by ReduceIterator
+func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} {
+ for _, data := range array {
+ initialValue = iterator(initialValue, data)
+ }
+ return initialValue
+}
+
+// Each iterates over the slice and apply Iterator to every item
+func Each(array []interface{}, iterator Iterator) {
+ for index, data := range array {
+ iterator(data, index)
+ }
+}
+
+// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
+func Map(array []interface{}, iterator ResultIterator) []interface{} {
+ var result = make([]interface{}, len(array))
+ for index, data := range array {
+ result[index] = iterator(data, index)
+ }
+ return result
+}
+
+// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
+func Find(array []interface{}, iterator ConditionIterator) interface{} {
+ for index, data := range array {
+ if iterator(data, index) {
+ return data
+ }
+ }
+ return nil
+}
+
+// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
+func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
+ var result = make([]interface{}, 0)
+ for index, data := range array {
+ if iterator(data, index) {
+ result = append(result, data)
+ }
+ }
+ return result
+}
+
+// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
+func Count(array []interface{}, iterator ConditionIterator) int {
+ count := 0
+ for index, data := range array {
+ if iterator(data, index) {
+ count = count + 1
+ }
+ }
+ return count
+}
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/converter.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/converter.go
new file mode 100644
index 000000000000..d68e990fc256
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/converter.go
@@ -0,0 +1,81 @@
+package govalidator
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// ToString convert the input to a string.
+func ToString(obj interface{}) string {
+ res := fmt.Sprintf("%v", obj)
+ return res
+}
+
+// ToJSON convert the input to a valid JSON string
+func ToJSON(obj interface{}) (string, error) {
+ res, err := json.Marshal(obj)
+ if err != nil {
+ res = []byte("")
+ }
+ return string(res), err
+}
+
+// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
+func ToFloat(value interface{}) (res float64, err error) {
+ val := reflect.ValueOf(value)
+
+ switch value.(type) {
+ case int, int8, int16, int32, int64:
+ res = float64(val.Int())
+ case uint, uint8, uint16, uint32, uint64:
+ res = float64(val.Uint())
+ case float32, float64:
+ res = val.Float()
+ case string:
+ res, err = strconv.ParseFloat(val.String(), 64)
+ if err != nil {
+ res = 0
+ }
+ default:
+ err = fmt.Errorf("ToInt: unknown interface type %T", value)
+ res = 0
+ }
+
+ return
+}
+
+// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
+func ToInt(value interface{}) (res int64, err error) {
+ val := reflect.ValueOf(value)
+
+ switch value.(type) {
+ case int, int8, int16, int32, int64:
+ res = val.Int()
+ case uint, uint8, uint16, uint32, uint64:
+ res = int64(val.Uint())
+ case float32, float64:
+ res = int64(val.Float())
+ case string:
+ if IsInt(val.String()) {
+ res, err = strconv.ParseInt(val.String(), 0, 64)
+ if err != nil {
+ res = 0
+ }
+ } else {
+ err = fmt.Errorf("ToInt: invalid numeric format %g", value)
+ res = 0
+ }
+ default:
+ err = fmt.Errorf("ToInt: unknown interface type %T", value)
+ res = 0
+ }
+
+ return
+}
+
+// ToBoolean convert the input string to a boolean.
+func ToBoolean(str string) (bool, error) {
+ return strconv.ParseBool(str)
+}
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/doc.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/doc.go
new file mode 100644
index 000000000000..55dce62dc8c3
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/doc.go
@@ -0,0 +1,3 @@
+package govalidator
+
+// A package of validators and sanitizers for strings, structures and collections.
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/error.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/error.go
new file mode 100644
index 000000000000..1da2336f47ee
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/error.go
@@ -0,0 +1,47 @@
+package govalidator
+
+import (
+ "sort"
+ "strings"
+)
+
+// Errors is an array of multiple errors and conforms to the error interface.
+type Errors []error
+
+// Errors returns itself.
+func (es Errors) Errors() []error {
+ return es
+}
+
+func (es Errors) Error() string {
+ var errs []string
+ for _, e := range es {
+ errs = append(errs, e.Error())
+ }
+ sort.Strings(errs)
+ return strings.Join(errs, ";")
+}
+
+// Error encapsulates a name, an error and whether there's a custom error message or not.
+type Error struct {
+ Name string
+ Err error
+ CustomErrorMessageExists bool
+
+ // Validator indicates the name of the validator that failed
+ Validator string
+ Path []string
+}
+
+func (e Error) Error() string {
+ if e.CustomErrorMessageExists {
+ return e.Err.Error()
+ }
+
+ errName := e.Name
+ if len(e.Path) > 0 {
+ errName = strings.Join(append(e.Path, e.Name), ".")
+ }
+
+ return errName + ": " + e.Err.Error()
+}
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/numerics.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/numerics.go
new file mode 100644
index 000000000000..5041d9e86844
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/numerics.go
@@ -0,0 +1,100 @@
+package govalidator
+
+import (
+ "math"
+)
+
+// Abs returns absolute value of number
+func Abs(value float64) float64 {
+ return math.Abs(value)
+}
+
+// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
+func Sign(value float64) float64 {
+ if value > 0 {
+ return 1
+ } else if value < 0 {
+ return -1
+ } else {
+ return 0
+ }
+}
+
+// IsNegative returns true if value < 0
+func IsNegative(value float64) bool {
+ return value < 0
+}
+
+// IsPositive returns true if value > 0
+func IsPositive(value float64) bool {
+ return value > 0
+}
+
+// IsNonNegative returns true if value >= 0
+func IsNonNegative(value float64) bool {
+ return value >= 0
+}
+
+// IsNonPositive returns true if value <= 0
+func IsNonPositive(value float64) bool {
+ return value <= 0
+}
+
+// InRangeInt returns true if value lies between left and right border
+func InRangeInt(value, left, right interface{}) bool {
+ value64, _ := ToInt(value)
+ left64, _ := ToInt(left)
+ right64, _ := ToInt(right)
+ if left64 > right64 {
+ left64, right64 = right64, left64
+ }
+ return value64 >= left64 && value64 <= right64
+}
+
+// InRangeFloat32 returns true if value lies between left and right border
+func InRangeFloat32(value, left, right float32) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// InRangeFloat64 returns true if value lies between left and right border
+func InRangeFloat64(value, left, right float64) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string.
+// All types must the same type.
+// False if value doesn't lie in range or if it incompatible or not comparable
+func InRange(value interface{}, left interface{}, right interface{}) bool {
+ switch value.(type) {
+ case int:
+ intValue, _ := ToInt(value)
+ intLeft, _ := ToInt(left)
+ intRight, _ := ToInt(right)
+ return InRangeInt(intValue, intLeft, intRight)
+ case float32, float64:
+ intValue, _ := ToFloat(value)
+ intLeft, _ := ToFloat(left)
+ intRight, _ := ToFloat(right)
+ return InRangeFloat64(intValue, intLeft, intRight)
+ case string:
+ return value.(string) >= left.(string) && value.(string) <= right.(string)
+ default:
+ return false
+ }
+}
+
+// IsWhole returns true if value is whole number
+func IsWhole(value float64) bool {
+ return math.Remainder(value, 1) == 0
+}
+
+// IsNatural returns true if value is natural number (positive and whole)
+func IsNatural(value float64) bool {
+ return IsWhole(value) && IsPositive(value)
+}
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/patterns.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/patterns.go
new file mode 100644
index 000000000000..bafc3765ea12
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/patterns.go
@@ -0,0 +1,113 @@
+package govalidator
+
+import "regexp"
+
+// Basic regular expressions for validating strings
+const (
+ Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
+ CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$"
+ ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
+ ISBN13 string = "^(?:[0-9]{13})$"
+ UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ Alpha string = "^[a-zA-Z]+$"
+ Alphanumeric string = "^[a-zA-Z0-9]+$"
+ Numeric string = "^[0-9]+$"
+ Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
+ Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
+ Hexadecimal string = "^[0-9a-fA-F]+$"
+ Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
+ ASCII string = "^[\x00-\x7F]+$"
+ Multibyte string = "[^\x00-\x7F]"
+ FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ PrintableASCII string = "^[\x20-\x7E]+$"
+ DataURI string = "^data:.+\\/(.+);base64$"
+ MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$"
+ Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
+ Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
+ DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
+ IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
+ URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
+ URLUsername string = `(\S+(:\S*)?@)`
+ URLPath string = `((\/|\?|#)[^\s]*)`
+ URLPort string = `(:(\d{1,5}))`
+ URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))`
+ URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
+ URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
+ SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
+ WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
+ UnixPath string = `^(/[^/\x00]*)+/?$`
+ WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
+ UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$`
+ Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
+ tagName string = "valid"
+ hasLowerCase string = ".*[[:lower:]]"
+ hasUpperCase string = ".*[[:upper:]]"
+ hasWhitespace string = ".*[[:space:]]"
+ hasWhitespaceOnly string = "^[[:space:]]+$"
+ IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$"
+ IMSI string = "^\\d{14,15}$"
+ E164 string = `^\+?[1-9]\d{1,14}$`
+)
+
+// Used by IsFilePath func
+const (
+ // Unknown is unresolved OS type
+ Unknown = iota
+ // Win is Windows type
+ Win
+ // Unix is *nix OS types
+ Unix
+)
+
+var (
+ userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
+ hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
+ userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
+ rxEmail = regexp.MustCompile(Email)
+ rxCreditCard = regexp.MustCompile(CreditCard)
+ rxISBN10 = regexp.MustCompile(ISBN10)
+ rxISBN13 = regexp.MustCompile(ISBN13)
+ rxUUID3 = regexp.MustCompile(UUID3)
+ rxUUID4 = regexp.MustCompile(UUID4)
+ rxUUID5 = regexp.MustCompile(UUID5)
+ rxUUID = regexp.MustCompile(UUID)
+ rxAlpha = regexp.MustCompile(Alpha)
+ rxAlphanumeric = regexp.MustCompile(Alphanumeric)
+ rxNumeric = regexp.MustCompile(Numeric)
+ rxInt = regexp.MustCompile(Int)
+ rxFloat = regexp.MustCompile(Float)
+ rxHexadecimal = regexp.MustCompile(Hexadecimal)
+ rxHexcolor = regexp.MustCompile(Hexcolor)
+ rxRGBcolor = regexp.MustCompile(RGBcolor)
+ rxASCII = regexp.MustCompile(ASCII)
+ rxPrintableASCII = regexp.MustCompile(PrintableASCII)
+ rxMultibyte = regexp.MustCompile(Multibyte)
+ rxFullWidth = regexp.MustCompile(FullWidth)
+ rxHalfWidth = regexp.MustCompile(HalfWidth)
+ rxBase64 = regexp.MustCompile(Base64)
+ rxDataURI = regexp.MustCompile(DataURI)
+ rxMagnetURI = regexp.MustCompile(MagnetURI)
+ rxLatitude = regexp.MustCompile(Latitude)
+ rxLongitude = regexp.MustCompile(Longitude)
+ rxDNSName = regexp.MustCompile(DNSName)
+ rxURL = regexp.MustCompile(URL)
+ rxSSN = regexp.MustCompile(SSN)
+ rxWinPath = regexp.MustCompile(WinPath)
+ rxUnixPath = regexp.MustCompile(UnixPath)
+ rxARWinPath = regexp.MustCompile(WinARPath)
+ rxARUnixPath = regexp.MustCompile(UnixARPath)
+ rxSemver = regexp.MustCompile(Semver)
+ rxHasLowerCase = regexp.MustCompile(hasLowerCase)
+ rxHasUpperCase = regexp.MustCompile(hasUpperCase)
+ rxHasWhitespace = regexp.MustCompile(hasWhitespace)
+ rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
+ rxIMEI = regexp.MustCompile(IMEI)
+ rxIMSI = regexp.MustCompile(IMSI)
+ rxE164 = regexp.MustCompile(E164)
+)
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/types.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/types.go
new file mode 100644
index 000000000000..c573abb51aff
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/types.go
@@ -0,0 +1,656 @@
+package govalidator
+
+import (
+ "reflect"
+ "regexp"
+ "sort"
+ "sync"
+)
+
+// Validator is a wrapper for a validator function that returns bool and accepts string.
+type Validator func(str string) bool
+
+// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
+// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
+type CustomTypeValidator func(i interface{}, o interface{}) bool
+
+// ParamValidator is a wrapper for validator functions that accept additional parameters.
+type ParamValidator func(str string, params ...string) bool
+
+// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value
+type InterfaceParamValidator func(in interface{}, params ...string) bool
+type tagOptionsMap map[string]tagOption
+
+func (t tagOptionsMap) orderedKeys() []string {
+ var keys []string
+ for k := range t {
+ keys = append(keys, k)
+ }
+
+ sort.Slice(keys, func(a, b int) bool {
+ return t[keys[a]].order < t[keys[b]].order
+ })
+
+ return keys
+}
+
+type tagOption struct {
+ name string
+ customErrorMessage string
+ order int
+}
+
+// UnsupportedTypeError is a wrapper for reflect.Type
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value
+var InterfaceParamTagMap = map[string]InterfaceParamValidator{
+ "type": IsType,
+}
+
+// InterfaceParamTagRegexMap maps interface param tags to their respective regexes.
+var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{
+ "type": regexp.MustCompile(`^type\((.*)\)$`),
+}
+
+// ParamTagMap is a map of functions accept variants parameters
+var ParamTagMap = map[string]ParamValidator{
+ "length": ByteLength,
+ "range": Range,
+ "runelength": RuneLength,
+ "stringlength": StringLength,
+ "matches": StringMatches,
+ "in": IsInRaw,
+ "rsapub": IsRsaPub,
+ "minstringlength": MinStringLength,
+ "maxstringlength": MaxStringLength,
+}
+
+// ParamTagRegexMap maps param tags to their respective regexes.
+var ParamTagRegexMap = map[string]*regexp.Regexp{
+ "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
+ "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
+ "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
+ "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
+ "in": regexp.MustCompile(`^in\((.*)\)`),
+ "matches": regexp.MustCompile(`^matches\((.+)\)$`),
+ "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
+ "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"),
+ "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"),
+}
+
+type customTypeTagMap struct {
+ validators map[string]CustomTypeValidator
+
+ sync.RWMutex
+}
+
+func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
+ tm.RLock()
+ defer tm.RUnlock()
+ v, ok := tm.validators[name]
+ return v, ok
+}
+
+func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
+ tm.Lock()
+ defer tm.Unlock()
+ tm.validators[name] = ctv
+}
+
+// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
+// Use this to validate compound or custom types that need to be handled as a whole, e.g.
+// `type UUID [16]byte` (this would be handled as an array of bytes).
+var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
+
+// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
+var TagMap = map[string]Validator{
+ "email": IsEmail,
+ "url": IsURL,
+ "dialstring": IsDialString,
+ "requrl": IsRequestURL,
+ "requri": IsRequestURI,
+ "alpha": IsAlpha,
+ "utfletter": IsUTFLetter,
+ "alphanum": IsAlphanumeric,
+ "utfletternum": IsUTFLetterNumeric,
+ "numeric": IsNumeric,
+ "utfnumeric": IsUTFNumeric,
+ "utfdigit": IsUTFDigit,
+ "hexadecimal": IsHexadecimal,
+ "hexcolor": IsHexcolor,
+ "rgbcolor": IsRGBcolor,
+ "lowercase": IsLowerCase,
+ "uppercase": IsUpperCase,
+ "int": IsInt,
+ "float": IsFloat,
+ "null": IsNull,
+ "notnull": IsNotNull,
+ "uuid": IsUUID,
+ "uuidv3": IsUUIDv3,
+ "uuidv4": IsUUIDv4,
+ "uuidv5": IsUUIDv5,
+ "creditcard": IsCreditCard,
+ "isbn10": IsISBN10,
+ "isbn13": IsISBN13,
+ "json": IsJSON,
+ "multibyte": IsMultibyte,
+ "ascii": IsASCII,
+ "printableascii": IsPrintableASCII,
+ "fullwidth": IsFullWidth,
+ "halfwidth": IsHalfWidth,
+ "variablewidth": IsVariableWidth,
+ "base64": IsBase64,
+ "datauri": IsDataURI,
+ "ip": IsIP,
+ "port": IsPort,
+ "ipv4": IsIPv4,
+ "ipv6": IsIPv6,
+ "dns": IsDNSName,
+ "host": IsHost,
+ "mac": IsMAC,
+ "latitude": IsLatitude,
+ "longitude": IsLongitude,
+ "ssn": IsSSN,
+ "semver": IsSemver,
+ "rfc3339": IsRFC3339,
+ "rfc3339WithoutZone": IsRFC3339WithoutZone,
+ "ISO3166Alpha2": IsISO3166Alpha2,
+ "ISO3166Alpha3": IsISO3166Alpha3,
+ "ISO4217": IsISO4217,
+ "IMEI": IsIMEI,
+ "ulid": IsULID,
+}
+
+// ISO3166Entry stores country codes
+type ISO3166Entry struct {
+ EnglishShortName string
+ FrenchShortName string
+ Alpha2Code string
+ Alpha3Code string
+ Numeric string
+}
+
+//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
+var ISO3166List = []ISO3166Entry{
+ {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
+ {"Albania", "Albanie (l')", "AL", "ALB", "008"},
+ {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
+ {"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
+ {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
+ {"Andorra", "Andorre (l')", "AD", "AND", "020"},
+ {"Angola", "Angola (l')", "AO", "AGO", "024"},
+ {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
+ {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
+ {"Argentina", "Argentine (l')", "AR", "ARG", "032"},
+ {"Australia", "Australie (l')", "AU", "AUS", "036"},
+ {"Austria", "Autriche (l')", "AT", "AUT", "040"},
+ {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
+ {"Bahrain", "Bahreïn", "BH", "BHR", "048"},
+ {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
+ {"Armenia", "Arménie (l')", "AM", "ARM", "051"},
+ {"Barbados", "Barbade (la)", "BB", "BRB", "052"},
+ {"Belgium", "Belgique (la)", "BE", "BEL", "056"},
+ {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
+ {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
+ {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
+ {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
+ {"Botswana", "Botswana (le)", "BW", "BWA", "072"},
+ {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
+ {"Brazil", "Brésil (le)", "BR", "BRA", "076"},
+ {"Belize", "Belize (le)", "BZ", "BLZ", "084"},
+ {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
+ {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
+ {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
+ {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
+ {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
+ {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
+ {"Burundi", "Burundi (le)", "BI", "BDI", "108"},
+ {"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
+ {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
+ {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
+ {"Canada", "Canada (le)", "CA", "CAN", "124"},
+ {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
+ {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
+ {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
+ {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
+ {"Chad", "Tchad (le)", "TD", "TCD", "148"},
+ {"Chile", "Chili (le)", "CL", "CHL", "152"},
+ {"China", "Chine (la)", "CN", "CHN", "156"},
+ {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
+ {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
+ {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
+ {"Colombia", "Colombie (la)", "CO", "COL", "170"},
+ {"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
+ {"Mayotte", "Mayotte", "YT", "MYT", "175"},
+ {"Congo (the)", "Congo (le)", "CG", "COG", "178"},
+ {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
+ {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
+ {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
+ {"Croatia", "Croatie (la)", "HR", "HRV", "191"},
+ {"Cuba", "Cuba", "CU", "CUB", "192"},
+ {"Cyprus", "Chypre", "CY", "CYP", "196"},
+ {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
+ {"Benin", "Bénin (le)", "BJ", "BEN", "204"},
+ {"Denmark", "Danemark (le)", "DK", "DNK", "208"},
+ {"Dominica", "Dominique (la)", "DM", "DMA", "212"},
+ {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
+ {"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
+ {"El Salvador", "El Salvador", "SV", "SLV", "222"},
+ {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
+ {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
+ {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
+ {"Estonia", "Estonie (l')", "EE", "EST", "233"},
+ {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
+ {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
+ {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
+ {"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
+ {"Finland", "Finlande (la)", "FI", "FIN", "246"},
+ {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
+ {"France", "France (la)", "FR", "FRA", "250"},
+ {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
+ {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
+ {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
+ {"Djibouti", "Djibouti", "DJ", "DJI", "262"},
+ {"Gabon", "Gabon (le)", "GA", "GAB", "266"},
+ {"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
+ {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
+ {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
+ {"Germany", "Allemagne (l')", "DE", "DEU", "276"},
+ {"Ghana", "Ghana (le)", "GH", "GHA", "288"},
+ {"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
+ {"Kiribati", "Kiribati", "KI", "KIR", "296"},
+ {"Greece", "Grèce (la)", "GR", "GRC", "300"},
+ {"Greenland", "Groenland (le)", "GL", "GRL", "304"},
+ {"Grenada", "Grenade (la)", "GD", "GRD", "308"},
+ {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
+ {"Guam", "Guam", "GU", "GUM", "316"},
+ {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
+ {"Guinea", "Guinée (la)", "GN", "GIN", "324"},
+ {"Guyana", "Guyana (le)", "GY", "GUY", "328"},
+ {"Haiti", "Haïti", "HT", "HTI", "332"},
+ {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
+ {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
+ {"Honduras", "Honduras (le)", "HN", "HND", "340"},
+ {"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
+ {"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
+ {"Iceland", "Islande (l')", "IS", "ISL", "352"},
+ {"India", "Inde (l')", "IN", "IND", "356"},
+ {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
+ {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
+ {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
+ {"Ireland", "Irlande (l')", "IE", "IRL", "372"},
+ {"Israel", "Israël", "IL", "ISR", "376"},
+ {"Italy", "Italie (l')", "IT", "ITA", "380"},
+ {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
+ {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
+ {"Japan", "Japon (le)", "JP", "JPN", "392"},
+ {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
+ {"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
+ {"Kenya", "Kenya (le)", "KE", "KEN", "404"},
+ {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
+ {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
+ {"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
+ {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
+ {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
+ {"Lebanon", "Liban (le)", "LB", "LBN", "422"},
+ {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
+ {"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
+ {"Liberia", "Libéria (le)", "LR", "LBR", "430"},
+ {"Libya", "Libye (la)", "LY", "LBY", "434"},
+ {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
+ {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
+ {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
+ {"Macao", "Macao", "MO", "MAC", "446"},
+ {"Madagascar", "Madagascar", "MG", "MDG", "450"},
+ {"Malawi", "Malawi (le)", "MW", "MWI", "454"},
+ {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
+ {"Maldives", "Maldives (les)", "MV", "MDV", "462"},
+ {"Mali", "Mali (le)", "ML", "MLI", "466"},
+ {"Malta", "Malte", "MT", "MLT", "470"},
+ {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
+ {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
+ {"Mauritius", "Maurice", "MU", "MUS", "480"},
+ {"Mexico", "Mexique (le)", "MX", "MEX", "484"},
+ {"Monaco", "Monaco", "MC", "MCO", "492"},
+ {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
+ {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
+ {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
+ {"Montserrat", "Montserrat", "MS", "MSR", "500"},
+ {"Morocco", "Maroc (le)", "MA", "MAR", "504"},
+ {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
+ {"Oman", "Oman", "OM", "OMN", "512"},
+ {"Namibia", "Namibie (la)", "NA", "NAM", "516"},
+ {"Nauru", "Nauru", "NR", "NRU", "520"},
+ {"Nepal", "Népal (le)", "NP", "NPL", "524"},
+ {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
+ {"Curaçao", "Curaçao", "CW", "CUW", "531"},
+ {"Aruba", "Aruba", "AW", "ABW", "533"},
+ {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
+ {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
+ {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
+ {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
+ {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
+ {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
+ {"Niger (the)", "Niger (le)", "NE", "NER", "562"},
+ {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
+ {"Niue", "Niue", "NU", "NIU", "570"},
+ {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
+ {"Norway", "Norvège (la)", "NO", "NOR", "578"},
+ {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
+ {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
+ {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
+ {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
+ {"Palau", "Palaos (les)", "PW", "PLW", "585"},
+ {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
+ {"Panama", "Panama (le)", "PA", "PAN", "591"},
+ {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
+ {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
+ {"Peru", "Pérou (le)", "PE", "PER", "604"},
+ {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
+ {"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
+ {"Poland", "Pologne (la)", "PL", "POL", "616"},
+ {"Portugal", "Portugal (le)", "PT", "PRT", "620"},
+ {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
+ {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
+ {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
+ {"Qatar", "Qatar (le)", "QA", "QAT", "634"},
+ {"Réunion", "Réunion (La)", "RE", "REU", "638"},
+ {"Romania", "Roumanie (la)", "RO", "ROU", "642"},
+ {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
+ {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
+ {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
+ {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
+ {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
+ {"Anguilla", "Anguilla", "AI", "AIA", "660"},
+ {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
+ {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
+ {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
+ {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
+ {"San Marino", "Saint-Marin", "SM", "SMR", "674"},
+ {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
+ {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
+ {"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
+ {"Serbia", "Serbie (la)", "RS", "SRB", "688"},
+ {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
+ {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
+ {"Singapore", "Singapour", "SG", "SGP", "702"},
+ {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
+ {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
+ {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
+ {"Somalia", "Somalie (la)", "SO", "SOM", "706"},
+ {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
+ {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
+ {"Spain", "Espagne (l')", "ES", "ESP", "724"},
+ {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
+ {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
+ {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
+ {"Suriname", "Suriname (le)", "SR", "SUR", "740"},
+ {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
+ {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
+ {"Sweden", "Suède (la)", "SE", "SWE", "752"},
+ {"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
+ {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
+ {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
+ {"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
+ {"Togo", "Togo (le)", "TG", "TGO", "768"},
+ {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
+ {"Tonga", "Tonga (les)", "TO", "TON", "776"},
+ {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
+ {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
+ {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
+ {"Turkey", "Turquie (la)", "TR", "TUR", "792"},
+ {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
+ {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
+ {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
+ {"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
+ {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
+ {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
+ {"Egypt", "Égypte (l')", "EG", "EGY", "818"},
+ {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
+ {"Guernsey", "Guernesey", "GG", "GGY", "831"},
+ {"Jersey", "Jersey", "JE", "JEY", "832"},
+ {"Isle of Man", "Île de Man", "IM", "IMN", "833"},
+ {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
+ {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
+ {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
+ {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
+ {"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
+ {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
+ {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
+ {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
+ {"Samoa", "Samoa (le)", "WS", "WSM", "882"},
+ {"Yemen", "Yémen (le)", "YE", "YEM", "887"},
+ {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
+}
+
+// ISO4217List is the list of ISO currency codes
+var ISO4217List = []string{
+ "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
+ "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
+ "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
+ "DJF", "DKK", "DOP", "DZD",
+ "EGP", "ERN", "ETB", "EUR",
+ "FJD", "FKP",
+ "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
+ "HKD", "HNL", "HRK", "HTG", "HUF",
+ "IDR", "ILS", "INR", "IQD", "IRR", "ISK",
+ "JMD", "JOD", "JPY",
+ "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
+ "LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
+ "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
+ "NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
+ "OMR",
+ "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
+ "QAR",
+ "RON", "RSD", "RUB", "RWF",
+ "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL",
+ "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
+ "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS",
+ "VEF", "VES", "VND", "VUV",
+ "WST",
+ "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
+ "YER",
+ "ZAR", "ZMW", "ZWL",
+}
+
+// ISO693Entry stores ISO language codes
+type ISO693Entry struct {
+ Alpha3bCode string
+ Alpha2Code string
+ English string
+}
+
+//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
+var ISO693List = []ISO693Entry{
+ {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
+ {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
+ {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
+ {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
+ {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
+ {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
+ {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
+ {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
+ {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
+ {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
+ {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
+ {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
+ {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
+ {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
+ {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
+ {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
+ {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
+ {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
+ {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
+ {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
+ {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
+ {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
+ {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
+ {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
+ {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
+ {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
+ {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
+ {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
+ {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
+ {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
+ {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
+ {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
+ {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
+ {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
+ {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
+ {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
+ {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
+ {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
+ {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
+ {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
+ {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
+ {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
+ {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
+ {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
+ {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
+ {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
+ {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
+ {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
+ {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
+ {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
+ {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
+ {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
+ {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
+ {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
+ {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
+ {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
+ {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
+ {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
+ {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
+ {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
+ {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
+ {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
+ {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
+ {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
+ {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
+ {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
+ {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
+ {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
+ {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
+ {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
+ {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
+ {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
+ {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
+ {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
+ {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
+ {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
+ {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
+ {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
+ {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
+ {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
+ {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
+ {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
+ {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
+ {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
+ {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
+ {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
+ {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
+ {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
+ {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
+ {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
+ {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
+ {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
+ {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
+ {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
+ {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
+ {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
+ {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
+ {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
+ {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
+ {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
+ {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
+ {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
+ {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
+ {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
+ {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
+ {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
+ {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
+ {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
+ {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
+ {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
+ {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
+ {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
+ {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
+ {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
+ {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
+ {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
+ {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
+ {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
+ {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
+ {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
+ {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
+ {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
+ {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
+ {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
+ {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
+ {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
+ {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
+ {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
+ {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
+ {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
+ {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
+ {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
+ {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
+ {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
+ {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
+ {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
+ {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
+ {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
+ {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
+ {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
+ {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
+ {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
+ {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
+ {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
+ {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
+ {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
+ {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
+ {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
+ {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
+ {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
+ {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
+ {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
+ {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
+ {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
+ {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
+ {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
+ {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
+ {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
+ {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
+ {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
+ {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
+ {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
+ {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
+ {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
+ {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
+ {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
+ {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
+ {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
+ {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
+ {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
+ {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
+ {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
+ {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
+ {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
+ {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
+ {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
+ {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
+ {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
+ {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
+ {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
+ {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
+ {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
+ {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
+ {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
+}
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/utils.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/utils.go
new file mode 100644
index 000000000000..f4c30f824a22
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/utils.go
@@ -0,0 +1,270 @@
+package govalidator
+
+import (
+ "errors"
+ "fmt"
+ "html"
+ "math"
+ "path"
+ "regexp"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Contains checks if the string contains the substring.
+func Contains(str, substring string) bool {
+ return strings.Contains(str, substring)
+}
+
+// Matches checks if string matches the pattern (pattern is regular expression)
+// In case of error return false
+func Matches(str, pattern string) bool {
+ match, _ := regexp.MatchString(pattern, str)
+ return match
+}
+
+// LeftTrim trims characters from the left side of the input.
+// If second argument is empty, it will remove leading spaces.
+func LeftTrim(str, chars string) string {
+ if chars == "" {
+ return strings.TrimLeftFunc(str, unicode.IsSpace)
+ }
+ r, _ := regexp.Compile("^[" + chars + "]+")
+ return r.ReplaceAllString(str, "")
+}
+
+// RightTrim trims characters from the right side of the input.
+// If second argument is empty, it will remove trailing spaces.
+func RightTrim(str, chars string) string {
+ if chars == "" {
+ return strings.TrimRightFunc(str, unicode.IsSpace)
+ }
+ r, _ := regexp.Compile("[" + chars + "]+$")
+ return r.ReplaceAllString(str, "")
+}
+
+// Trim trims characters from both sides of the input.
+// If second argument is empty, it will remove spaces.
+func Trim(str, chars string) string {
+ return LeftTrim(RightTrim(str, chars), chars)
+}
+
+// WhiteList removes characters that do not appear in the whitelist.
+func WhiteList(str, chars string) string {
+ pattern := "[^" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, "")
+}
+
+// BlackList removes characters that appear in the blacklist.
+func BlackList(str, chars string) string {
+ pattern := "[" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, "")
+}
+
+// StripLow removes characters with a numerical value < 32 and 127, mostly control characters.
+// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
+func StripLow(str string, keepNewLines bool) string {
+ chars := ""
+ if keepNewLines {
+ chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
+ } else {
+ chars = "\x00-\x1F\x7F"
+ }
+ return BlackList(str, chars)
+}
+
+// ReplacePattern replaces regular expression pattern in string
+func ReplacePattern(str, pattern, replace string) string {
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, replace)
+}
+
+// Escape replaces <, >, & and " with HTML entities.
+var Escape = html.EscapeString
+
+func addSegment(inrune, segment []rune) []rune {
+ if len(segment) == 0 {
+ return inrune
+ }
+ if len(inrune) != 0 {
+ inrune = append(inrune, '_')
+ }
+ inrune = append(inrune, segment...)
+ return inrune
+}
+
+// UnderscoreToCamelCase converts from underscore separated form to camel case form.
+// Ex.: my_func => MyFunc
+func UnderscoreToCamelCase(s string) string {
+ return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
+}
+
+// CamelCaseToUnderscore converts from camel case form to underscore separated form.
+// Ex.: MyFunc => my_func
+func CamelCaseToUnderscore(str string) string {
+ var output []rune
+ var segment []rune
+ for _, r := range str {
+
+ // not treat number as separate segment
+ if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
+ output = addSegment(output, segment)
+ segment = nil
+ }
+ segment = append(segment, unicode.ToLower(r))
+ }
+ output = addSegment(output, segment)
+ return string(output)
+}
+
+// Reverse returns reversed string
+func Reverse(s string) string {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r)
+}
+
+// GetLines splits string by "\n" and return array of lines
+func GetLines(s string) []string {
+ return strings.Split(s, "\n")
+}
+
+// GetLine returns specified line of multiline string
+func GetLine(s string, index int) (string, error) {
+ lines := GetLines(s)
+ if index < 0 || index >= len(lines) {
+ return "", errors.New("line index out of bounds")
+ }
+ return lines[index], nil
+}
+
+// RemoveTags removes all tags from HTML string
+func RemoveTags(s string) string {
+ return ReplacePattern(s, "<[^>]*>", "")
+}
+
+// SafeFileName returns safe string that can be used in file names
+func SafeFileName(str string) string {
+ name := strings.ToLower(str)
+ name = path.Clean(path.Base(name))
+ name = strings.Trim(name, " ")
+ separators, err := regexp.Compile(`[ &_=+:]`)
+ if err == nil {
+ name = separators.ReplaceAllString(name, "-")
+ }
+ legal, err := regexp.Compile(`[^[:alnum:]-.]`)
+ if err == nil {
+ name = legal.ReplaceAllString(name, "")
+ }
+ for strings.Contains(name, "--") {
+ name = strings.Replace(name, "--", "-", -1)
+ }
+ return name
+}
+
+// NormalizeEmail canonicalize an email address.
+// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
+// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
+// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
+// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
+// normalized to @gmail.com.
+func NormalizeEmail(str string) (string, error) {
+ if !IsEmail(str) {
+ return "", fmt.Errorf("%s is not an email", str)
+ }
+ parts := strings.Split(str, "@")
+ parts[0] = strings.ToLower(parts[0])
+ parts[1] = strings.ToLower(parts[1])
+ if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
+ parts[1] = "gmail.com"
+ parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
+ }
+ return strings.Join(parts, "@"), nil
+}
+
+// Truncate a string to the closest length without breaking words.
+func Truncate(str string, length int, ending string) string {
+ var aftstr, befstr string
+ if len(str) > length {
+ words := strings.Fields(str)
+ before, present := 0, 0
+ for i := range words {
+ befstr = aftstr
+ before = present
+ aftstr = aftstr + words[i] + " "
+ present = len(aftstr)
+ if present > length && i != 0 {
+ if (length - before) < (present - length) {
+ return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ }
+ }
+
+ return str
+}
+
+// PadLeft pads left side of a string if size of string is less then indicated pad length
+func PadLeft(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, true, false)
+}
+
+// PadRight pads right side of a string if size of string is less then indicated pad length
+func PadRight(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, false, true)
+}
+
+// PadBoth pads both sides of a string if size of string is less then indicated pad length
+func PadBoth(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, true, true)
+}
+
+// PadString either left, right or both sides.
+// Note that padding string can be unicode and more then one character
+func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
+
+ // When padded length is less then the current string size
+ if padLen < utf8.RuneCountInString(str) {
+ return str
+ }
+
+ padLen -= utf8.RuneCountInString(str)
+
+ targetLen := padLen
+
+ targetLenLeft := targetLen
+ targetLenRight := targetLen
+ if padLeft && padRight {
+ targetLenLeft = padLen / 2
+ targetLenRight = padLen - targetLenLeft
+ }
+
+ strToRepeatLen := utf8.RuneCountInString(padStr)
+
+ repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
+ repeatedString := strings.Repeat(padStr, repeatTimes)
+
+ leftSide := ""
+ if padLeft {
+ leftSide = repeatedString[0:targetLenLeft]
+ }
+
+ rightSide := ""
+ if padRight {
+ rightSide = repeatedString[0:targetLenRight]
+ }
+
+ return leftSide + str + rightSide
+}
+
+// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
+func TruncatingErrorf(str string, args ...interface{}) error {
+ n := strings.Count(str, "%s")
+ return fmt.Errorf(str, args[:n]...)
+}
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/validator.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/validator.go
new file mode 100644
index 000000000000..c9c4fac0655a
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/validator.go
@@ -0,0 +1,1768 @@
+// Package govalidator is package of validators and sanitizers for strings, structs and collections.
+package govalidator
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+var (
+ fieldsRequiredByDefault bool
+ nilPtrAllowedByRequired = false
+ notNumberRegexp = regexp.MustCompile("[^0-9]+")
+ whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`)
+ paramsRegexp = regexp.MustCompile(`\(.*\)$`)
+)
+
+const maxURLRuneCount = 2083
+const minURLRuneCount = 3
+const rfc3339WithoutZone = "2006-01-02T15:04:05"
+
+// SetFieldsRequiredByDefault causes validation to fail when struct fields
+// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
+// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+// type exampleStruct struct {
+// Name string ``
+// Email string `valid:"email"`
+// This, however, will only fail when Email is empty or an invalid email address:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email"`
+// Lastly, this will only fail when Email is an invalid email address but not when it's empty:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email,optional"`
+func SetFieldsRequiredByDefault(value bool) {
+ fieldsRequiredByDefault = value
+}
+
+// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required.
+// The validation will still reject ptr fields in their zero value state. Example with this enabled:
+// type exampleStruct struct {
+// Name *string `valid:"required"`
+// With `Name` set to "", this will be considered invalid input and will cause a validation error.
+// With `Name` set to nil, this will be considered valid by validation.
+// By default this is disabled.
+func SetNilPtrAllowedByRequired(value bool) {
+ nilPtrAllowedByRequired = value
+}
+
+// IsEmail checks if the string is an email.
+func IsEmail(str string) bool {
+ // TODO uppercase letters are not supported
+ return rxEmail.MatchString(str)
+}
+
+// IsExistingEmail checks if the string is an email of existing domain
+func IsExistingEmail(email string) bool {
+
+ if len(email) < 6 || len(email) > 254 {
+ return false
+ }
+ at := strings.LastIndex(email, "@")
+ if at <= 0 || at > len(email)-3 {
+ return false
+ }
+ user := email[:at]
+ host := email[at+1:]
+ if len(user) > 64 {
+ return false
+ }
+ switch host {
+ case "localhost", "example.com":
+ return true
+ }
+ if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) {
+ return false
+ }
+ if _, err := net.LookupMX(host); err != nil {
+ if _, err := net.LookupIP(host); err != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsURL checks if the string is an URL.
+func IsURL(str string) bool {
+ if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
+ return false
+ }
+ strTemp := str
+ if strings.Contains(str, ":") && !strings.Contains(str, "://") {
+ // support no indicated urlscheme but with colon for port number
+ // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString
+ strTemp = "http://" + str
+ }
+ u, err := url.Parse(strTemp)
+ if err != nil {
+ return false
+ }
+ if strings.HasPrefix(u.Host, ".") {
+ return false
+ }
+ if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
+ return false
+ }
+ return rxURL.MatchString(str)
+}
+
+// IsRequestURL checks if the string rawurl, assuming
+// it was received in an HTTP request, is a valid
+// URL confirm to RFC 3986
+func IsRequestURL(rawurl string) bool {
+ url, err := url.ParseRequestURI(rawurl)
+ if err != nil {
+ return false //Couldn't even parse the rawurl
+ }
+ if len(url.Scheme) == 0 {
+ return false //No Scheme found
+ }
+ return true
+}
+
+// IsRequestURI checks if the string rawurl, assuming
+// it was received in an HTTP request, is an
+// absolute URI or an absolute path.
+func IsRequestURI(rawurl string) bool {
+ _, err := url.ParseRequestURI(rawurl)
+ return err == nil
+}
+
+// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid.
+func IsAlpha(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlpha.MatchString(str)
+}
+
+//IsUTFLetter checks if the string contains only unicode letter characters.
+//Similar to IsAlpha but for all languages. Empty string is valid.
+func IsUTFLetter(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+
+ for _, c := range str {
+ if !unicode.IsLetter(c) {
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid.
+func IsAlphanumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlphanumeric.MatchString(str)
+}
+
+// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid.
+func IsUTFLetterNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ for _, c := range str {
+ if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsNumeric checks if the string contains only numbers. Empty string is valid.
+func IsNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxNumeric.MatchString(str)
+}
+
+// IsUTFNumeric checks if the string contains only unicode numbers of any kind.
+// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
+func IsUTFNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsNumber(c) { //numbers && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid.
+func IsUTFDigit(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsDigit(c) { //digits && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsHexadecimal checks if the string is a hexadecimal number.
+func IsHexadecimal(str string) bool {
+ return rxHexadecimal.MatchString(str)
+}
+
+// IsHexcolor checks if the string is a hexadecimal color.
+func IsHexcolor(str string) bool {
+ return rxHexcolor.MatchString(str)
+}
+
+// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
+func IsRGBcolor(str string) bool {
+ return rxRGBcolor.MatchString(str)
+}
+
+// IsLowerCase checks if the string is lowercase. Empty string is valid.
+func IsLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToLower(str)
+}
+
+// IsUpperCase checks if the string is uppercase. Empty string is valid.
+func IsUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToUpper(str)
+}
+
+// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid.
+func HasLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHasLowerCase.MatchString(str)
+}
+
+// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid.
+func HasUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHasUpperCase.MatchString(str)
+}
+
+// IsInt checks if the string is an integer. Empty string is valid.
+func IsInt(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxInt.MatchString(str)
+}
+
+// IsFloat checks if the string is a float.
+func IsFloat(str string) bool {
+ return str != "" && rxFloat.MatchString(str)
+}
+
+// IsDivisibleBy checks if the string is a number that's divisible by another.
+// If second argument is not valid integer or zero, it's return false.
+// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
+func IsDivisibleBy(str, num string) bool {
+ f, _ := ToFloat(str)
+ p := int64(f)
+ q, _ := ToInt(num)
+ if q == 0 {
+ return false
+ }
+ return (p == 0) || (p%q == 0)
+}
+
+// IsNull checks if the string is null.
+func IsNull(str string) bool {
+ return len(str) == 0
+}
+
+// IsNotNull checks if the string is not null.
+func IsNotNull(str string) bool {
+ return !IsNull(str)
+}
+
+// HasWhitespaceOnly checks the string only contains whitespace
+func HasWhitespaceOnly(str string) bool {
+ return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str)
+}
+
+// HasWhitespace checks if the string contains any whitespace
+func HasWhitespace(str string) bool {
+ return len(str) > 0 && rxHasWhitespace.MatchString(str)
+}
+
+// IsByteLength checks if the string's length (in bytes) falls in a range.
+func IsByteLength(str string, min, max int) bool {
+ return len(str) >= min && len(str) <= max
+}
+
+// IsUUIDv3 checks if the string is a UUID version 3.
+func IsUUIDv3(str string) bool {
+ return rxUUID3.MatchString(str)
+}
+
+// IsUUIDv4 checks if the string is a UUID version 4.
+func IsUUIDv4(str string) bool {
+ return rxUUID4.MatchString(str)
+}
+
+// IsUUIDv5 checks if the string is a UUID version 5.
+func IsUUIDv5(str string) bool {
+ return rxUUID5.MatchString(str)
+}
+
+// IsUUID checks if the string is a UUID (version 3, 4 or 5).
+func IsUUID(str string) bool {
+ return rxUUID.MatchString(str)
+}
+
+// Byte to index table for O(1) lookups when unmarshaling.
+// We use 0xFF as sentinel value for invalid indexes.
+var ulidDec = [...]byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
+ 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
+ 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
+ 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
+ 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
+ 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+}
+
+// EncodedSize is the length of a text encoded ULID.
+const ulidEncodedSize = 26
+
+// IsULID checks if the string is a ULID.
+//
+// Implementation got from:
+// https://github.com/oklog/ulid (Apache-2.0 License)
+//
+func IsULID(str string) bool {
+ // Check if a base32 encoded ULID is the right length.
+ if len(str) != ulidEncodedSize {
+ return false
+ }
+
+ // Check if all the characters in a base32 encoded ULID are part of the
+ // expected base32 character set.
+ if ulidDec[str[0]] == 0xFF ||
+ ulidDec[str[1]] == 0xFF ||
+ ulidDec[str[2]] == 0xFF ||
+ ulidDec[str[3]] == 0xFF ||
+ ulidDec[str[4]] == 0xFF ||
+ ulidDec[str[5]] == 0xFF ||
+ ulidDec[str[6]] == 0xFF ||
+ ulidDec[str[7]] == 0xFF ||
+ ulidDec[str[8]] == 0xFF ||
+ ulidDec[str[9]] == 0xFF ||
+ ulidDec[str[10]] == 0xFF ||
+ ulidDec[str[11]] == 0xFF ||
+ ulidDec[str[12]] == 0xFF ||
+ ulidDec[str[13]] == 0xFF ||
+ ulidDec[str[14]] == 0xFF ||
+ ulidDec[str[15]] == 0xFF ||
+ ulidDec[str[16]] == 0xFF ||
+ ulidDec[str[17]] == 0xFF ||
+ ulidDec[str[18]] == 0xFF ||
+ ulidDec[str[19]] == 0xFF ||
+ ulidDec[str[20]] == 0xFF ||
+ ulidDec[str[21]] == 0xFF ||
+ ulidDec[str[22]] == 0xFF ||
+ ulidDec[str[23]] == 0xFF ||
+ ulidDec[str[24]] == 0xFF ||
+ ulidDec[str[25]] == 0xFF {
+ return false
+ }
+
+ // Check if the first character in a base32 encoded ULID will overflow. This
+ // happens because the base32 representation encodes 130 bits, while the
+ // ULID is only 128 bits.
+ //
+ // See https://github.com/oklog/ulid/issues/9 for details.
+ if str[0] > '7' {
+ return false
+ }
+ return true
+}
+
+// IsCreditCard checks if the string is a credit card.
+func IsCreditCard(str string) bool {
+ sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
+ if !rxCreditCard.MatchString(sanitized) {
+ return false
+ }
+
+ number, _ := ToInt(sanitized)
+ number, lastDigit := number / 10, number % 10
+
+ var sum int64
+ for i:=0; number > 0; i++ {
+ digit := number % 10
+
+ if i % 2 == 0 {
+ digit *= 2
+ if digit > 9 {
+ digit -= 9
+ }
+ }
+
+ sum += digit
+ number = number / 10
+ }
+
+ return (sum + lastDigit) % 10 == 0
+}
+
+// IsISBN10 checks if the string is an ISBN version 10.
+func IsISBN10(str string) bool {
+ return IsISBN(str, 10)
+}
+
+// IsISBN13 checks if the string is an ISBN version 13.
+func IsISBN13(str string) bool {
+ return IsISBN(str, 13)
+}
+
+// IsISBN checks if the string is an ISBN (version 10 or 13).
+// If version value is not equal to 10 or 13, it will be checks both variants.
+func IsISBN(str string, version int) bool {
+ sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
+ var checksum int32
+ var i int32
+ if version == 10 {
+ if !rxISBN10.MatchString(sanitized) {
+ return false
+ }
+ for i = 0; i < 9; i++ {
+ checksum += (i + 1) * int32(sanitized[i]-'0')
+ }
+ if sanitized[9] == 'X' {
+ checksum += 10 * 10
+ } else {
+ checksum += 10 * int32(sanitized[9]-'0')
+ }
+ if checksum%11 == 0 {
+ return true
+ }
+ return false
+ } else if version == 13 {
+ if !rxISBN13.MatchString(sanitized) {
+ return false
+ }
+ factor := []int32{1, 3}
+ for i = 0; i < 12; i++ {
+ checksum += factor[i%2] * int32(sanitized[i]-'0')
+ }
+ return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0
+ }
+ return IsISBN(str, 10) || IsISBN(str, 13)
+}
+
+// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal).
+func IsJSON(str string) bool {
+ var js json.RawMessage
+ return json.Unmarshal([]byte(str), &js) == nil
+}
+
+// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid.
+func IsMultibyte(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxMultibyte.MatchString(str)
+}
+
+// IsASCII checks if the string contains ASCII chars only. Empty string is valid.
+func IsASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxASCII.MatchString(str)
+}
+
+// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid.
+func IsPrintableASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxPrintableASCII.MatchString(str)
+}
+
+// IsFullWidth checks if the string contains any full-width chars. Empty string is valid.
+func IsFullWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxFullWidth.MatchString(str)
+}
+
+// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid.
+func IsHalfWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str)
+}
+
+// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid.
+func IsVariableWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
+}
+
+// IsBase64 checks if a string is base64 encoded.
+func IsBase64(str string) bool {
+ return rxBase64.MatchString(str)
+}
+
+// IsFilePath checks is a string is Win or Unix file path and returns it's type.
+func IsFilePath(str string) (bool, int) {
+ if rxWinPath.MatchString(str) {
+ //check windows path limit see:
+ // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
+ if len(str[3:]) > 32767 {
+ return false, Win
+ }
+ return true, Win
+ } else if rxUnixPath.MatchString(str) {
+ return true, Unix
+ }
+ return false, Unknown
+}
+
+//IsWinFilePath checks both relative & absolute paths in Windows
+func IsWinFilePath(str string) bool {
+ if rxARWinPath.MatchString(str) {
+ //check windows path limit see:
+ // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
+ if len(str[3:]) > 32767 {
+ return false
+ }
+ return true
+ }
+ return false
+}
+
+//IsUnixFilePath checks both relative & absolute paths in Unix
+func IsUnixFilePath(str string) bool {
+ if rxARUnixPath.MatchString(str) {
+ return true
+ }
+ return false
+}
+
+// IsDataURI checks if a string is base64 encoded data URI such as an image
+func IsDataURI(str string) bool {
+ dataURI := strings.Split(str, ",")
+ if !rxDataURI.MatchString(dataURI[0]) {
+ return false
+ }
+ return IsBase64(dataURI[1])
+}
+
+// IsMagnetURI checks if a string is valid magnet URI
+func IsMagnetURI(str string) bool {
+ return rxMagnetURI.MatchString(str)
+}
+
+// IsISO3166Alpha2 checks if a string is valid two-letter country code
+func IsISO3166Alpha2(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO3166Alpha3 checks if a string is valid three-letter country code
+func IsISO3166Alpha3(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha3Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO693Alpha2 checks if a string is valid two-letter language code
+func IsISO693Alpha2(str string) bool {
+ for _, entry := range ISO693List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO693Alpha3b checks if a string is valid three-letter language code
+func IsISO693Alpha3b(str string) bool {
+ for _, entry := range ISO693List {
+ if str == entry.Alpha3bCode {
+ return true
+ }
+ }
+ return false
+}
+
+// IsDNSName will validate the given string as a DNS name
+func IsDNSName(str string) bool {
+ if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
+ // constraints already violated
+ return false
+ }
+ return !IsIP(str) && rxDNSName.MatchString(str)
+}
+
+// IsHash checks if a string is a hash of type algorithm.
+// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b']
+func IsHash(str string, algorithm string) bool {
+ var len string
+ algo := strings.ToLower(algorithm)
+
+ if algo == "crc32" || algo == "crc32b" {
+ len = "8"
+ } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" {
+ len = "32"
+ } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" {
+ len = "40"
+ } else if algo == "tiger192" {
+ len = "48"
+ } else if algo == "sha3-224" {
+ len = "56"
+ } else if algo == "sha256" || algo == "sha3-256" {
+ len = "64"
+ } else if algo == "sha384" || algo == "sha3-384" {
+ len = "96"
+ } else if algo == "sha512" || algo == "sha3-512" {
+ len = "128"
+ } else {
+ return false
+ }
+
+ return Matches(str, "^[a-f0-9]{"+len+"}$")
+}
+
+// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")`
+func IsSHA3224(str string) bool {
+ return IsHash(str, "sha3-224")
+}
+
+// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")`
+func IsSHA3256(str string) bool {
+ return IsHash(str, "sha3-256")
+}
+
+// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")`
+func IsSHA3384(str string) bool {
+ return IsHash(str, "sha3-384")
+}
+
+// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")`
+func IsSHA3512(str string) bool {
+ return IsHash(str, "sha3-512")
+}
+
+// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")`
+func IsSHA512(str string) bool {
+ return IsHash(str, "sha512")
+}
+
+// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")`
+func IsSHA384(str string) bool {
+ return IsHash(str, "sha384")
+}
+
+// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")`
+func IsSHA256(str string) bool {
+ return IsHash(str, "sha256")
+}
+
+// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")`
+func IsTiger192(str string) bool {
+ return IsHash(str, "tiger192")
+}
+
+// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")`
+func IsTiger160(str string) bool {
+ return IsHash(str, "tiger160")
+}
+
+// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")`
+func IsRipeMD160(str string) bool {
+ return IsHash(str, "ripemd160")
+}
+
+// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")`
+func IsSHA1(str string) bool {
+ return IsHash(str, "sha1")
+}
+
+// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")`
+func IsTiger128(str string) bool {
+ return IsHash(str, "tiger128")
+}
+
+// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")`
+func IsRipeMD128(str string) bool {
+ return IsHash(str, "ripemd128")
+}
+
+// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")`
+func IsCRC32(str string) bool {
+ return IsHash(str, "crc32")
+}
+
+// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")`
+func IsCRC32b(str string) bool {
+ return IsHash(str, "crc32b")
+}
+
+// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")`
+func IsMD5(str string) bool {
+ return IsHash(str, "md5")
+}
+
+// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")`
+func IsMD4(str string) bool {
+ return IsHash(str, "md4")
+}
+
+// IsDialString validates the given string for usage with the various Dial() functions
+func IsDialString(str string) bool {
+ if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) {
+ return true
+ }
+
+ return false
+}
+
+// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP`
+func IsIP(str string) bool {
+ return net.ParseIP(str) != nil
+}
+
+// IsPort checks if a string represents a valid port
+func IsPort(str string) bool {
+ if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 {
+ return true
+ }
+ return false
+}
+
+// IsIPv4 checks if the string is an IP version 4.
+func IsIPv4(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ".")
+}
+
+// IsIPv6 checks if the string is an IP version 6.
+func IsIPv6(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ":")
+}
+
+// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6)
+func IsCIDR(str string) bool {
+ _, _, err := net.ParseCIDR(str)
+ return err == nil
+}
+
+// IsMAC checks if a string is valid MAC address.
+// Possible MAC formats:
+// 01:23:45:67:89:ab
+// 01:23:45:67:89:ab:cd:ef
+// 01-23-45-67-89-ab
+// 01-23-45-67-89-ab-cd-ef
+// 0123.4567.89ab
+// 0123.4567.89ab.cdef
+func IsMAC(str string) bool {
+ _, err := net.ParseMAC(str)
+ return err == nil
+}
+
+// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name
+func IsHost(str string) bool {
+ return IsIP(str) || IsDNSName(str)
+}
+
+// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId.
+func IsMongoID(str string) bool {
+ return rxHexadecimal.MatchString(str) && (len(str) == 24)
+}
+
+// IsLatitude checks if a string is valid latitude.
+func IsLatitude(str string) bool {
+ return rxLatitude.MatchString(str)
+}
+
+// IsLongitude checks if a string is valid longitude.
+func IsLongitude(str string) bool {
+ return rxLongitude.MatchString(str)
+}
+
+// IsIMEI checks if a string is valid IMEI
+func IsIMEI(str string) bool {
+ return rxIMEI.MatchString(str)
+}
+
+// IsIMSI checks if a string is valid IMSI
+func IsIMSI(str string) bool {
+ if !rxIMSI.MatchString(str) {
+ return false
+ }
+
+ mcc, err := strconv.ParseInt(str[0:3], 10, 32)
+ if err != nil {
+ return false
+ }
+
+ switch mcc {
+ case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219:
+ case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235:
+ case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257:
+ case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278:
+ case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293:
+ case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314:
+ case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346:
+ case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364:
+ case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402:
+ case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417:
+ case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428:
+ case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441:
+ case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467:
+ case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528:
+ case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545:
+ case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555:
+ case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611:
+ case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621:
+ case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631:
+ case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641:
+ case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652:
+ case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708:
+ case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736:
+ case 738, 740, 742, 744, 746, 748, 750, 995:
+ return true
+ default:
+ return false
+ }
+ return true
+}
+
+// IsRsaPublicKey checks if a string is valid public key with provided length
+func IsRsaPublicKey(str string, keylen int) bool {
+ bb := bytes.NewBufferString(str)
+ pemBytes, err := ioutil.ReadAll(bb)
+ if err != nil {
+ return false
+ }
+ block, _ := pem.Decode(pemBytes)
+ if block != nil && block.Type != "PUBLIC KEY" {
+ return false
+ }
+ var der []byte
+
+ if block != nil {
+ der = block.Bytes
+ } else {
+ der, err = base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return false
+ }
+ }
+
+ key, err := x509.ParsePKIXPublicKey(der)
+ if err != nil {
+ return false
+ }
+ pubkey, ok := key.(*rsa.PublicKey)
+ if !ok {
+ return false
+ }
+ bitlen := len(pubkey.N.Bytes()) * 8
+ return bitlen == int(keylen)
+}
+
+// IsRegex checks if a give string is a valid regex with RE2 syntax or not
+func IsRegex(str string) bool {
+ if _, err := regexp.Compile(str); err == nil {
+ return true
+ }
+ return false
+}
+
+func toJSONName(tag string) string {
+ if tag == "" {
+ return ""
+ }
+
+ // JSON name always comes first. If there's no options then split[0] is
+ // JSON name, if JSON name is not set, then split[0] is an empty string.
+ split := strings.SplitN(tag, ",", 2)
+
+ name := split[0]
+
+ // However it is possible that the field is skipped when
+ // (de-)serializing from/to JSON, in which case assume that there is no
+ // tag name to use
+ if name == "-" {
+ return ""
+ }
+ return name
+}
+
+func prependPathToErrors(err error, path string) error {
+ switch err2 := err.(type) {
+ case Error:
+ err2.Path = append([]string{path}, err2.Path...)
+ return err2
+ case Errors:
+ errors := err2.Errors()
+ for i, err3 := range errors {
+ errors[i] = prependPathToErrors(err3, path)
+ }
+ return err2
+ }
+ return err
+}
+
+// ValidateArray performs validation according to condition iterator that validates every element of the array
+func ValidateArray(array []interface{}, iterator ConditionIterator) bool {
+ return Every(array, iterator)
+}
+
+// ValidateMap use validation map for fields.
+// result will be equal to `false` if there are any errors.
+// s is the map containing the data to be validated.
+// m is the validation map in the form:
+// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}}
+func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) {
+ if s == nil {
+ return true, nil
+ }
+ result := true
+ var err error
+ var errs Errors
+ var index int
+ val := reflect.ValueOf(s)
+ for key, value := range s {
+ presentResult := true
+ validator, ok := m[key]
+ if !ok {
+ presentResult = false
+ var err error
+ err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key)
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ valueField := reflect.ValueOf(value)
+ mapResult := true
+ typeResult := true
+ structResult := true
+ resultField := true
+ switch subValidator := validator.(type) {
+ case map[string]interface{}:
+ var err error
+ if v, ok := value.(map[string]interface{}); !ok {
+ mapResult = false
+ err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String())
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ } else {
+ mapResult, err = ValidateMap(v, subValidator)
+ if err != nil {
+ mapResult = false
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ }
+ case string:
+ if (valueField.Kind() == reflect.Struct ||
+ (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
+ subValidator != "-" {
+ var err error
+ structResult, err = ValidateStruct(valueField.Interface())
+ if err != nil {
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ }
+ resultField, err = typeCheck(valueField, reflect.StructField{
+ Name: key,
+ PkgPath: "",
+ Type: val.Type(),
+ Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)),
+ Offset: 0,
+ Index: []int{index},
+ Anonymous: false,
+ }, val, nil)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case nil:
+ // already handlerd when checked before
+ default:
+ typeResult = false
+ err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String())
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ result = result && presentResult && typeResult && resultField && structResult && mapResult
+ index++
+ }
+ // checks required keys
+ requiredResult := true
+ for key, value := range m {
+ if schema, ok := value.(string); ok {
+ tags := parseTagIntoMap(schema)
+ if required, ok := tags["required"]; ok {
+ if _, ok := s[key]; !ok {
+ requiredResult = false
+ if required.customErrorMessage != "" {
+ err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}}
+ } else {
+ err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}}
+ }
+ errs = append(errs, err)
+ }
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ err = errs
+ }
+ return result && requiredResult, err
+}
+
+// ValidateStruct use tags for fields.
+// result will be equal to `false` if there are any errors.
+// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail)
+func ValidateStruct(s interface{}) (bool, error) {
+ if s == nil {
+ return true, nil
+ }
+ result := true
+ var err error
+ val := reflect.ValueOf(s)
+ if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ // we only accept structs
+ if val.Kind() != reflect.Struct {
+ return false, fmt.Errorf("function only accepts structs; got %s", val.Kind())
+ }
+ var errs Errors
+ for i := 0; i < val.NumField(); i++ {
+ valueField := val.Field(i)
+ typeField := val.Type().Field(i)
+ if typeField.PkgPath != "" {
+ continue // Private field
+ }
+ structResult := true
+ if valueField.Kind() == reflect.Interface {
+ valueField = valueField.Elem()
+ }
+ if (valueField.Kind() == reflect.Struct ||
+ (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
+ typeField.Tag.Get(tagName) != "-" {
+ var err error
+ structResult, err = ValidateStruct(valueField.Interface())
+ if err != nil {
+ err = prependPathToErrors(err, typeField.Name)
+ errs = append(errs, err)
+ }
+ }
+ resultField, err2 := typeCheck(valueField, typeField, val, nil)
+ if err2 != nil {
+
+ // Replace structure name with JSON name if there is a tag on the variable
+ jsonTag := toJSONName(typeField.Tag.Get("json"))
+ if jsonTag != "" {
+ switch jsonError := err2.(type) {
+ case Error:
+ jsonError.Name = jsonTag
+ err2 = jsonError
+ case Errors:
+ for i2, err3 := range jsonError {
+ switch customErr := err3.(type) {
+ case Error:
+ customErr.Name = jsonTag
+ jsonError[i2] = customErr
+ }
+ }
+
+ err2 = jsonError
+ }
+ }
+
+ errs = append(errs, err2)
+ }
+ result = result && resultField && structResult
+ }
+ if len(errs) > 0 {
+ err = errs
+ }
+ return result, err
+}
+
+// ValidateStructAsync performs async validation of the struct and returns results through the channels
+func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) {
+ res := make(chan bool)
+ errors := make(chan error)
+
+ go func() {
+ defer close(res)
+ defer close(errors)
+
+ isValid, isFailed := ValidateStruct(s)
+
+ res <- isValid
+ errors <- isFailed
+ }()
+
+ return res, errors
+}
+
+// ValidateMapAsync performs async validation of the map and returns results through the channels
+func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) {
+ res := make(chan bool)
+ errors := make(chan error)
+
+ go func() {
+ defer close(res)
+ defer close(errors)
+
+ isValid, isFailed := ValidateMap(s, m)
+
+ res <- isValid
+ errors <- isFailed
+ }()
+
+ return res, errors
+}
+
+// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
+func parseTagIntoMap(tag string) tagOptionsMap {
+ optionsMap := make(tagOptionsMap)
+ options := strings.Split(tag, ",")
+
+ for i, option := range options {
+ option = strings.TrimSpace(option)
+
+ validationOptions := strings.Split(option, "~")
+ if !isValidTag(validationOptions[0]) {
+ continue
+ }
+ if len(validationOptions) == 2 {
+ optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i}
+ } else {
+ optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i}
+ }
+ }
+ return optionsMap
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// IsSSN will validate the given string as a U.S. Social Security Number
+func IsSSN(str string) bool {
+ if str == "" || len(str) != 11 {
+ return false
+ }
+ return rxSSN.MatchString(str)
+}
+
+// IsSemver checks if string is valid semantic version
+func IsSemver(str string) bool {
+ return rxSemver.MatchString(str)
+}
+
+// IsType checks if interface is of some type
+func IsType(v interface{}, params ...string) bool {
+ if len(params) == 1 {
+ typ := params[0]
+ return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1)
+ }
+ return false
+}
+
+// IsTime checks if string is valid according to given format
+func IsTime(str string, format string) bool {
+ _, err := time.Parse(format, str)
+ return err == nil
+}
+
+// IsUnixTime checks if string is valid unix timestamp value
+func IsUnixTime(str string) bool {
+ if _, err := strconv.Atoi(str); err == nil {
+ return true
+ }
+ return false
+}
+
+// IsRFC3339 checks if string is valid timestamp value according to RFC3339
+func IsRFC3339(str string) bool {
+ return IsTime(str, time.RFC3339)
+}
+
+// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone.
+func IsRFC3339WithoutZone(str string) bool {
+ return IsTime(str, rfc3339WithoutZone)
+}
+
+// IsISO4217 checks if string is valid ISO currency code
+func IsISO4217(str string) bool {
+ for _, currency := range ISO4217List {
+ if str == currency {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ByteLength checks string's length
+func ByteLength(str string, params ...string) bool {
+ if len(params) == 2 {
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return len(str) >= int(min) && len(str) <= int(max)
+ }
+
+ return false
+}
+
+// RuneLength checks string's length
+// Alias for StringLength
+func RuneLength(str string, params ...string) bool {
+ return StringLength(str, params...)
+}
+
+// IsRsaPub checks whether string is valid RSA key
+// Alias for IsRsaPublicKey
+func IsRsaPub(str string, params ...string) bool {
+ if len(params) == 1 {
+ len, _ := ToInt(params[0])
+ return IsRsaPublicKey(str, int(len))
+ }
+
+ return false
+}
+
+// StringMatches checks if a string matches a given pattern.
+func StringMatches(s string, params ...string) bool {
+ if len(params) == 1 {
+ pattern := params[0]
+ return Matches(s, pattern)
+ }
+ return false
+}
+
+// StringLength checks string's length (including multi byte strings)
+func StringLength(str string, params ...string) bool {
+
+ if len(params) == 2 {
+ strLength := utf8.RuneCountInString(str)
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return strLength >= int(min) && strLength <= int(max)
+ }
+
+ return false
+}
+
+// MinStringLength checks string's minimum length (including multi byte strings)
+func MinStringLength(str string, params ...string) bool {
+
+ if len(params) == 1 {
+ strLength := utf8.RuneCountInString(str)
+ min, _ := ToInt(params[0])
+ return strLength >= int(min)
+ }
+
+ return false
+}
+
+// MaxStringLength checks string's maximum length (including multi byte strings)
+func MaxStringLength(str string, params ...string) bool {
+
+ if len(params) == 1 {
+ strLength := utf8.RuneCountInString(str)
+ max, _ := ToInt(params[0])
+ return strLength <= int(max)
+ }
+
+ return false
+}
+
+// Range checks string's length
+func Range(str string, params ...string) bool {
+ if len(params) == 2 {
+ value, _ := ToFloat(str)
+ min, _ := ToFloat(params[0])
+ max, _ := ToFloat(params[1])
+ return InRange(value, min, max)
+ }
+
+ return false
+}
+
+// IsInRaw checks if string is in list of allowed values
+func IsInRaw(str string, params ...string) bool {
+ if len(params) == 1 {
+ rawParams := params[0]
+
+ parsedParams := strings.Split(rawParams, "|")
+
+ return IsIn(str, parsedParams...)
+ }
+
+ return false
+}
+
+// IsIn checks if string str is a member of the set of strings params
+func IsIn(str string, params ...string) bool {
+ for _, param := range params {
+ if str == param {
+ return true
+ }
+ }
+
+ return false
+}
+
+func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
+ if nilPtrAllowedByRequired {
+ k := v.Kind()
+ if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() {
+ return true, nil
+ }
+ }
+
+ if requiredOption, isRequired := options["required"]; isRequired {
+ if len(requiredOption.customErrorMessage) > 0 {
+ return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}}
+ } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
+ return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}}
+ }
+ // not required and empty is valid
+ return true, nil
+}
+
+func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) {
+ if !v.IsValid() {
+ return false, nil
+ }
+
+ tag := t.Tag.Get(tagName)
+
+ // checks if the field should be ignored
+ switch tag {
+ case "":
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Map {
+ if !fieldsRequiredByDefault {
+ return true, nil
+ }
+ return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}}
+ }
+ case "-":
+ return true, nil
+ }
+
+ isRootType := false
+ if options == nil {
+ isRootType = true
+ options = parseTagIntoMap(tag)
+ }
+
+ if isEmptyValue(v) {
+ // an empty value is not validated, checks only required
+ isValid, resultErr = checkRequired(v, t, options)
+ for key := range options {
+ delete(options, key)
+ }
+ return isValid, resultErr
+ }
+
+ var customTypeErrors Errors
+ optionsOrder := options.orderedKeys()
+ for _, validatorName := range optionsOrder {
+ validatorStruct := options[validatorName]
+ if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
+ delete(options, validatorName)
+
+ if result := validatefunc(v.Interface(), o.Interface()); !result {
+ if len(validatorStruct.customErrorMessage) > 0 {
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)})
+ continue
+ }
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)})
+ }
+ }
+ }
+
+ if len(customTypeErrors.Errors()) > 0 {
+ return false, customTypeErrors
+ }
+
+ if isRootType {
+ // Ensure that we've checked the value by all specified validators before report that the value is valid
+ defer func() {
+ delete(options, "optional")
+ delete(options, "required")
+
+ if isValid && resultErr == nil && len(options) != 0 {
+ optionsOrder := options.orderedKeys()
+ for _, validator := range optionsOrder {
+ isValid = false
+ resultErr = Error{t.Name, fmt.Errorf(
+ "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}}
+ return
+ }
+ }
+ }()
+ }
+
+ for _, validatorSpec := range optionsOrder {
+ validatorStruct := options[validatorSpec]
+ var negate bool
+ validator := validatorSpec
+ customMsgExists := len(validatorStruct.customErrorMessage) > 0
+
+ // checks whether the tag looks like '!something' or 'something'
+ if validator[0] == '!' {
+ validator = validator[1:]
+ negate = true
+ }
+
+ // checks for interface param validators
+ for key, value := range InterfaceParamTagRegexMap {
+ ps := value.FindStringSubmatch(validator)
+ if len(ps) == 0 {
+ continue
+ }
+
+ validatefunc, ok := InterfaceParamTagMap[key]
+ if !ok {
+ continue
+ }
+
+ delete(options, validatorSpec)
+
+ field := fmt.Sprint(v)
+ if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ // for each tag option checks the map of validator functions
+ for _, validatorSpec := range optionsOrder {
+ validatorStruct := options[validatorSpec]
+ var negate bool
+ validator := validatorSpec
+ customMsgExists := len(validatorStruct.customErrorMessage) > 0
+
+ // checks whether the tag looks like '!something' or 'something'
+ if validator[0] == '!' {
+ validator = validator[1:]
+ negate = true
+ }
+
+ // checks for param validators
+ for key, value := range ParamTagRegexMap {
+ ps := value.FindStringSubmatch(validator)
+ if len(ps) == 0 {
+ continue
+ }
+
+ validatefunc, ok := ParamTagMap[key]
+ if !ok {
+ continue
+ }
+
+ delete(options, validatorSpec)
+
+ switch v.Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ default:
+ // type not yet supported, fail
+ return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}}
+ }
+ }
+
+ if validatefunc, ok := TagMap[validator]; ok {
+ delete(options, validatorSpec)
+
+ switch v.Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field); !result && !negate || result && negate {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ default:
+ //Not Yet Supported Types (Fail here!)
+ err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
+ return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}}
+ }
+ }
+ }
+ return true, nil
+ case reflect.Map:
+ if v.Type().Key().Kind() != reflect.String {
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+ var sv stringValues
+ sv = v.MapKeys()
+ sort.Sort(sv)
+ result := true
+ for i, k := range sv {
+ var resultItem bool
+ var err error
+ if v.MapIndex(k).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.MapIndex(k), t, o, options)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.MapIndex(k).Interface())
+ if err != nil {
+ err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Slice, reflect.Array:
+ result := true
+ for i := 0; i < v.Len(); i++ {
+ var resultItem bool
+ var err error
+ if v.Index(i).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.Index(i), t, o, options)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.Index(i).Interface())
+ if err != nil {
+ err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Interface:
+ // If the value is an interface then encode its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return ValidateStruct(v.Interface())
+ case reflect.Ptr:
+ // If the value is a pointer then checks its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return typeCheck(v.Elem(), t, o, options)
+ case reflect.Struct:
+ return true, nil
+ default:
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+}
+
+func stripParams(validatorString string) string {
+ return paramsRegexp.ReplaceAllString(validatorString, "")
+}
+
+// isEmptyValue checks whether value empty or not
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String, reflect.Array:
+ return v.Len() == 0
+ case reflect.Map, reflect.Slice:
+ return v.Len() == 0 || v.IsNil()
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+
+ return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
+}
+
+// ErrorByField returns error for specified field of the struct
+// validated by ValidateStruct or empty string if there are no errors
+// or this field doesn't exists or doesn't have any errors.
+func ErrorByField(e error, field string) string {
+ if e == nil {
+ return ""
+ }
+ return ErrorsByField(e)[field]
+}
+
+// ErrorsByField returns map of errors of the struct validated
+// by ValidateStruct or empty map if there are no errors.
+func ErrorsByField(e error) map[string]string {
+ m := make(map[string]string)
+ if e == nil {
+ return m
+ }
+ // prototype for ValidateStruct
+
+ switch e := e.(type) {
+ case Error:
+ m[e.Name] = e.Err.Error()
+ case Errors:
+ for _, item := range e.Errors() {
+ n := ErrorsByField(item)
+ for k, v := range n {
+ m[k] = v
+ }
+ }
+ }
+
+ return m
+}
+
+// Error returns string equivalent for reflect.Type
+func (e *UnsupportedTypeError) Error() string {
+ return "validator: unsupported type: " + e.Type.String()
+}
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+func IsE164(str string) bool {
+ return rxE164.MatchString(str)
+}
diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/wercker.yml b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/wercker.yml
new file mode 100644
index 000000000000..bc5f7b0864bd
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/wercker.yml
@@ -0,0 +1,15 @@
+box: golang
+build:
+ steps:
+ - setup-go-workspace
+
+ - script:
+ name: go get
+ code: |
+ go version
+ go get -t ./...
+
+ - script:
+ name: go test
+ code: |
+ go test -race -v ./...
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/config.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/config.go
index 4818ea427e3f..776e31b21d65 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -20,16 +20,16 @@ type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig structure.
//
-// // Create Session with MaxRetries configuration to be shared by multiple
-// // service clients.
-// sess := session.Must(session.NewSession(&aws.Config{
-// MaxRetries: aws.Int(3),
-// }))
+// // Create Session with MaxRetries configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(&aws.Config{
+// MaxRetries: aws.Int(3),
+// }))
//
-// // Create S3 service client with a specific Region.
-// svc := s3.New(sess, &aws.Config{
-// Region: aws.String("us-west-2"),
-// })
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"),
+// })
type Config struct {
// Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to
@@ -192,6 +192,23 @@ type Config struct {
//
EC2MetadataDisableTimeoutOverride *bool
+ // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1.
+ // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility.
+ // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata
+ // client will return any errors encountered from attempting to fetch a token instead of silently
+ // using the insecure data flow of IMDSv1.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataEnableFallback(false)))
+ //
+ // svc := s3.New(sess)
+ //
+ // See [configuring IMDS] for more information.
+ //
+ // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
+ EC2MetadataEnableFallback *bool
+
// Instructs the endpoint to be generated for a service client to
// be the dual stack endpoint. The dual stack endpoint will support
// both IPv4 and IPv6 addressing.
@@ -283,16 +300,16 @@ type Config struct {
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
//
-// // Create Session with MaxRetries configuration to be shared by multiple
-// // service clients.
-// sess := session.Must(session.NewSession(aws.NewConfig().
-// WithMaxRetries(3),
-// ))
+// // Create Session with MaxRetries configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(aws.NewConfig().
+// WithMaxRetries(3),
+// ))
//
-// // Create S3 service client with a specific Region.
-// svc := s3.New(sess, aws.NewConfig().
-// WithRegion("us-west-2"),
-// )
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, aws.NewConfig().
+// WithRegion("us-west-2"),
+// )
func NewConfig() *Config {
return &Config{}
}
@@ -432,6 +449,13 @@ func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
return c
}
+// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config {
+ c.EC2MetadataEnableFallback = &v
+ return c
+}
+
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
@@ -576,6 +600,10 @@ func mergeInConfig(dst *Config, other *Config) {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
}
+ if other.EC2MetadataEnableFallback != nil {
+ dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback
+ }
+
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
index e62483600299..18694f07f7f1 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
@@ -226,12 +226,24 @@ func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider))
return credentials.NewCredentials(p)
}
-type credentialProcessResponse struct {
- Version int
- AccessKeyID string `json:"AccessKeyId"`
+// A CredentialProcessResponse is the AWS credentials format that must be
+// returned when executing an external credential_process.
+type CredentialProcessResponse struct {
+ // As of this writing, the Version key must be set to 1. This might
+ // increment over time as the structure evolves.
+ Version int
+
+ // The access key ID that identifies the temporary security credentials.
+ AccessKeyID string `json:"AccessKeyId"`
+
+ // The secret access key that can be used to sign requests.
SecretAccessKey string
- SessionToken string
- Expiration *time.Time
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ SessionToken string
+
+ // The date on which the current credentials expire.
+ Expiration *time.Time
}
// Retrieve executes the 'credential_process' and returns the credentials.
@@ -242,7 +254,7 @@ func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
}
// Serialize and validate response
- resp := &credentialProcessResponse{}
+ resp := &CredentialProcessResponse{}
if err = json.Unmarshal(out, resp); err != nil {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderParse,
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
index df63bade1048..f4cc8751d04f 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -57,13 +57,13 @@ type EC2Metadata struct {
// New creates a new instance of the EC2Metadata client with a session.
// This client is safe to use across multiple goroutines.
//
-//
// Example:
-// // Create a EC2Metadata client from just a session.
-// svc := ec2metadata.New(mySession)
//
-// // Create a EC2Metadata client with additional configuration
-// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
c := p.ClientConfig(ServiceName, cfgs...)
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
index 4b29f190bf94..604aeffdeb4e 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
@@ -1,6 +1,7 @@
package ec2metadata
import (
+ "fmt"
"net/http"
"sync/atomic"
"time"
@@ -33,11 +34,15 @@ func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider {
return &tokenProvider{client: c, configuredTTL: duration}
}
+// check if fallback is enabled
+func (t *tokenProvider) fallbackEnabled() bool {
+ return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback
+}
+
// fetchTokenHandler fetches token for EC2Metadata service client by default.
func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
-
// short-circuits to insecure data flow if tokenProvider is disabled.
- if v := atomic.LoadUint32(&t.disabled); v == 1 {
+ if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() {
return
}
@@ -49,23 +54,21 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
output, err := t.client.getToken(r.Context(), t.configuredTTL)
if err != nil {
+ // only attempt fallback to insecure data flow if IMDSv1 is enabled
+ if !t.fallbackEnabled() {
+ r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err)
+ return
+ }
- // change the disabled flag on token provider to true,
- // when error is request timeout error.
+ // change the disabled flag on token provider to true and fallback
if requestFailureError, ok := err.(awserr.RequestFailure); ok {
switch requestFailureError.StatusCode() {
case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
atomic.StoreUint32(&t.disabled, 1)
+ t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError))
case http.StatusBadRequest:
r.Error = requestFailureError
}
-
- // Check if request timed out while waiting for response
- if e, ok := requestFailureError.OrigErr().(awserr.Error); ok {
- if e.Code() == request.ErrCodeRequestError {
- atomic.StoreUint32(&t.disabled, 1)
- }
- }
}
return
}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 2fefde190869..dfdda6cb89c9 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -27,6 +27,7 @@ const (
ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta).
+ ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne).
CaCentral1RegionID = "ca-central-1" // Canada (Central).
EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
EuCentral2RegionID = "eu-central-2" // Europe (Zurich).
@@ -172,6 +173,9 @@ var awsPartition = partition{
"ap-southeast-3": region{
Description: "Asia Pacific (Jakarta)",
},
+ "ap-southeast-4": region{
+ Description: "Asia Pacific (Melbourne)",
+ },
"ca-central-1": region{
Description: "Canada (Central)",
},
@@ -261,6 +265,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -432,6 +439,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -582,6 +592,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -603,12 +616,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -663,6 +682,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -843,6 +865,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -898,6 +923,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -927,6 +955,34 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "aoss": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"api.detective": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -1146,6 +1202,14 @@ var awsPartition = partition{
Region: "ap-southeast-3",
},
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "api.ecr.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -1456,6 +1520,26 @@ var awsPartition = partition{
},
},
},
+ "api.ecr-public": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "api.ecr-public.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "api.ecr-public.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
"api.elastic-inference": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -1691,9 +1775,15 @@ var awsPartition = partition{
},
"api.mediatailor": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -1709,6 +1799,9 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -1759,6 +1852,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -1774,12 +1870,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -1789,6 +1891,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -2042,6 +2147,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -2233,6 +2341,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -2303,24 +2414,39 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -2330,6 +2456,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -2439,6 +2568,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -2515,6 +2647,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -2868,6 +3003,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -2973,6 +3117,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -3125,15 +3272,30 @@ var awsPartition = partition{
},
"arc-zonal-shift": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -3143,6 +3305,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -3493,6 +3667,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -3668,6 +3845,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -3865,6 +4045,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -3955,12 +4138,47 @@ var awsPartition = partition{
},
"cases": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{},
},
},
"cassandra": service{
@@ -4092,6 +4310,43 @@ var awsPartition = partition{
},
},
},
+ "cleanrooms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"cloud9": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -4191,6 +4446,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -4379,6 +4637,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -4553,6 +4814,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -4657,6 +4921,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -4767,50 +5034,7 @@ var awsPartition = partition{
},
},
},
- "codeartifact": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "codebuild": service{
+ "cloudtrail-data": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -4869,6 +5093,131 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codeartifact": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codebuild": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
@@ -4940,6 +5289,17 @@ var awsPartition = partition{
},
},
},
+ "codecatalyst": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "codecatalyst.global.api.aws",
+ },
+ },
+ },
"codecommit": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -4960,12 +5320,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -5011,6 +5377,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -5123,6 +5492,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -5269,6 +5641,9 @@ var awsPartition = partition{
},
"codepipeline": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -5299,6 +5674,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -5359,6 +5737,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -5579,6 +5960,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -5679,6 +6063,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -6169,6 +6556,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -6318,6 +6708,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
@@ -7008,6 +7401,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -7029,12 +7425,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -7320,6 +7722,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -7484,18 +7889,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -7505,6 +7919,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -7557,6 +7974,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -7900,6 +8320,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -7921,12 +8344,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -8065,6 +8494,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -8219,6 +8651,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -8228,6 +8663,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -8240,12 +8678,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -8390,6 +8834,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -8577,6 +9024,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -8740,6 +9190,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -8749,18 +9202,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -8885,6 +9347,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -9190,6 +9655,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -9235,6 +9709,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -9253,6 +9736,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -9334,6 +9826,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-south-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ap-southeast-1",
}: endpoint{
@@ -9379,6 +9880,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-central-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-north-1",
}: endpoint{
@@ -9397,6 +9907,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-south-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-west-1",
}: endpoint{
@@ -9589,6 +10108,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -9737,6 +10259,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -10292,6 +10817,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -10443,6 +10971,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -10660,6 +11191,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -10669,18 +11203,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -10828,6 +11371,9 @@ var awsPartition = partition{
}: endpoint{
Hostname: "fms-fips.ap-south-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -10846,6 +11392,9 @@ var awsPartition = partition{
}: endpoint{
Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -10864,6 +11413,9 @@ var awsPartition = partition{
}: endpoint{
Hostname: "fms-fips.eu-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -10876,6 +11428,9 @@ var awsPartition = partition{
}: endpoint{
Hostname: "fms-fips.eu-south-1.amazonaws.com",
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -11074,6 +11629,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -11456,6 +12014,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -11672,12 +12233,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -11687,6 +12254,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -11883,12 +12456,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -11934,6 +12513,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -12089,20 +12671,10 @@ var awsPartition = partition{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "dataplane-ap-south-1",
- }: endpoint{
- Hostname: "greengrass-ats.iot.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "dataplane-us-east-2",
+ Region: "ca-central-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "greengrass-ats.iot.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
+ Hostname: "greengrass-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
@@ -12113,15 +12685,69 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "greengrass-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "greengrass-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "greengrass-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "greengrass-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "greengrass-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "greengrass-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "greengrass-fips.us-west-2.amazonaws.com",
+ },
},
},
"groundstation": service{
@@ -12235,6 +12861,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -12250,12 +12879,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -12402,6 +13037,9 @@ var awsPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -12506,12 +13144,18 @@ var awsPartition = partition{
},
"identitystore": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -12521,6 +13165,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -12530,12 +13177,21 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -12830,6 +13486,156 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "internetmonitor": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "internetmonitor.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "internetmonitor.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "internetmonitor.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "internetmonitor.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "internetmonitor.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "internetmonitor.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "internetmonitor.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "internetmonitor.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "internetmonitor.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "internetmonitor.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "internetmonitor.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "internetmonitor.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "internetmonitor.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "internetmonitor.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "internetmonitor.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "internetmonitor.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "internetmonitor.us-west-2.api.aws",
+ },
+ },
+ },
"iot": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -13574,12 +14380,42 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com",
+ },
},
},
"iotwireless": service{
@@ -13676,6 +14512,31 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "ivsrealtime": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"kafka": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -13726,6 +14587,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -13800,6 +14664,12 @@ var awsPartition = partition{
},
"kendra": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -13868,6 +14738,146 @@ var awsPartition = partition{
},
},
},
+ "kendra-ranking": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-west-2.api.aws",
+ },
+ },
+ },
"kinesis": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -13900,6 +14910,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -14030,6 +15043,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -14045,12 +15061,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -14060,6 +15082,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -14323,6 +15348,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kms-fips.ap-southeast-4.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-southeast-4-fips",
+ }: endpoint{
+ Hostname: "kms-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -14639,6 +15682,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -14696,6 +15742,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -14832,6 +15881,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "lambda.ap-southeast-3.api.aws",
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-southeast-4.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -15168,6 +16226,151 @@ var awsPartition = partition{
},
},
},
+ "license-manager-linux-subscriptions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
"license-manager-user-subscriptions": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -15373,6 +16576,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -15557,6 +16763,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -15569,6 +16778,10 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -15581,15 +16794,67 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{},
},
},
"machinelearning": service{
@@ -15898,6 +17163,9 @@ var awsPartition = partition{
},
"mediaconvert": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -16419,6 +17687,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -16469,32 +17740,7 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "mgh": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mgn": service{
+ "metrics.sagemaker": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -16529,12 +17775,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -16542,7 +17794,7 @@ var awsPartition = partition{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
- Region: "eu-west-3",
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
@@ -16564,123 +17816,11 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "migrationhub-orchestrator": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "migrationhub-strategy": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mobileanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "models-v2-lex": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "models.lex": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "models-fips.lex.{region}.{dnsSuffix}",
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- },
+ "mgh": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
@@ -16696,47 +17836,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "models-fips.lex.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "models-fips.lex.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "us-west-2",
}: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "models-fips.lex.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "models-fips.lex.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
},
},
- "monitoring": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
+ "mgn": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -16756,9 +17861,6 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -16774,18 +17876,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -16798,7 +17894,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "monitoring-fips.us-east-1.amazonaws.com",
+ Hostname: "mgn-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -16807,7 +17903,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "monitoring-fips.us-east-2.amazonaws.com",
+ Hostname: "mgn-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -16816,7 +17912,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "monitoring-fips.us-west-1.amazonaws.com",
+ Hostname: "mgn-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -16825,7 +17921,324 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "monitoring-fips.us-west-2.amazonaws.com",
+ Hostname: "mgn-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "migrationhub-orchestrator": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "migrationhub-strategy": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "models-v2-lex": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "models-fips.lex.{region}.{dnsSuffix}",
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "monitoring": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
@@ -16964,6 +18377,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -17195,6 +18611,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -17267,6 +18686,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -17379,6 +18801,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -17431,6 +18856,14 @@ var awsPartition = partition{
},
"oidc": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "oidc.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{
@@ -17487,6 +18920,14 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "oidc.ap-southeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -17575,6 +19016,14 @@ var awsPartition = partition{
Region: "us-east-2",
},
},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "oidc.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{
@@ -17975,6 +19424,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -18164,6 +19616,79 @@ var awsPartition = partition{
},
},
},
+ "pipes": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"polly": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -18178,6 +19703,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -18287,6 +19815,14 @@ var awsPartition = partition{
},
"portal.sso": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "portal.sso.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{
@@ -18343,6 +19879,14 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "portal.sso.ap-southeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -18431,6 +19975,14 @@ var awsPartition = partition{
Region: "us-east-2",
},
},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "portal.sso.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{
@@ -18651,82 +20203,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
- endpointKey{
- Region: "api",
- }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "ram": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -18736,99 +20221,21 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ram-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ram-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ram-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ram-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ram-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.us-east-1.amazonaws.com",
- },
endpointKey{
Region: "us-east-2",
}: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.us-west-1.amazonaws.com",
- },
endpointKey{
Region: "us-west-2",
}: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.us-west-2.amazonaws.com",
- },
},
},
- "rbin": service{
+ "ram": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -18848,6 +20255,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -18857,6 +20267,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -18864,7 +20277,167 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "rbin-fips.ca-central-1.amazonaws.com",
+ Hostname: "ram-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ram-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ram-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ram-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ram-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ram-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "rbin": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
@@ -19014,6 +20587,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -19415,6 +20991,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -19548,12 +21127,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -19566,12 +21151,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -19896,7 +21487,9 @@ var awsPartition = partition{
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
defaultKey{
Variant: fipsVariant,
}: endpoint{
@@ -19950,6 +21543,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-2.api.aws",
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "resource-explorer-2.ap-southeast-4.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -20032,6 +21630,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -20041,18 +21642,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -20331,12 +21941,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -20346,6 +21962,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -20538,6 +22157,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -20553,12 +22175,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -20568,6 +22196,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -20776,6 +22407,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com",
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com",
+ },
endpointKey{
Region: "aws-global",
}: endpoint{
@@ -21690,6 +23330,13 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "sagemaker-geospatial": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"savingsplans": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -21706,30 +23353,84 @@ var awsPartition = partition{
},
"scheduler": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -21858,6 +23559,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -22003,6 +23707,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22018,12 +23725,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22069,6 +23782,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -22113,6 +23829,40 @@ var awsPartition = partition{
},
},
},
+ "securitylake": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"serverlessrepo": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -22232,6 +23982,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22241,18 +23994,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22262,6 +24024,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -22443,6 +24208,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -22492,33 +24260,111 @@ var awsPartition = partition{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.af-south-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-east-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-northeast-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-northeast-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-northeast-3.amazonaws.com",
+ },
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-south-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-south-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-southeast-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-southeast-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-southeast-3.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-southeast-4.amazonaws.com",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
@@ -22537,30 +24383,102 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-central-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-north-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-south-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-south-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-west-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-west-3.amazonaws.com",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.me-central-1.amazonaws.com",
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.me-south-1.amazonaws.com",
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.sa-east-1.amazonaws.com",
+ },
endpointKey{
Region: "servicediscovery",
}: endpoint{
@@ -22591,6 +24509,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
@@ -22609,6 +24533,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
@@ -22627,6 +24557,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
@@ -22645,6 +24581,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-west-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
@@ -22687,6 +24629,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22696,18 +24641,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22717,6 +24671,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -22857,6 +24814,34 @@ var awsPartition = partition{
},
},
},
+ "simspaceweaver": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"sms": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -22998,6 +24983,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -23007,12 +24998,51 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-west-2.amazonaws.com",
+ },
},
},
"snowball": service{
@@ -23359,6 +25389,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -23507,6 +25540,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -23652,6 +25688,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -23829,8 +25868,156 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "ssm-sap": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
"sso": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -23852,6 +26039,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -23885,6 +26075,9 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -23922,6 +26115,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -24052,6 +26248,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -24061,6 +26260,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -24082,12 +26284,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -24229,6 +26437,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -24321,6 +26532,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "aws-global",
}: endpoint{
@@ -24497,6 +26711,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -24639,6 +26856,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -24781,6 +27001,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -25342,6 +27565,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -25502,6 +27728,21 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "voice-chime-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "voice-chime-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -25518,12 +27759,12 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fips.voice-chime.us-east-1.amazonaws.com",
+ Hostname: "voice-chime-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-1-fips",
}: endpoint{
- Hostname: "fips.voice-chime.us-east-1.amazonaws.com",
+ Hostname: "voice-chime-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -25536,12 +27777,12 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fips.voice-chime.us-west-2.amazonaws.com",
+ Hostname: "voice-chime-fips.us-west-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2-fips",
}: endpoint{
- Hostname: "fips.voice-chime.us-west-2.amazonaws.com",
+ Hostname: "voice-chime-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
@@ -25622,6 +27863,31 @@ var awsPartition = partition{
},
},
},
+ "vpc-lattice": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"waf": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -25785,6 +28051,23 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "waf-regional.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -25836,6 +28119,23 @@ var awsPartition = partition{
Region: "ap-southeast-3",
},
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "waf-regional.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -25870,6 +28170,23 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "waf-regional.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -25904,6 +28221,23 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "waf-regional.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -26009,6 +28343,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-south-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ap-southeast-1",
}: endpoint{
@@ -26036,6 +28379,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-southeast-4",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -26054,6 +28406,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-central-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-north-1",
}: endpoint{
@@ -26072,6 +28433,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-south-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-west-1",
}: endpoint{
@@ -26099,6 +28469,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-me-central-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-me-south-1",
}: endpoint{
@@ -26153,6 +28532,23 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "waf-regional.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -26361,6 +28757,23 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "wafv2.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -26412,6 +28825,23 @@ var awsPartition = partition{
Region: "ap-southeast-3",
},
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "wafv2.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -26446,6 +28876,23 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "wafv2.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -26480,6 +28927,23 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "wafv2.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -26585,6 +29049,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-south-2",
+ }: endpoint{
+ Hostname: "wafv2-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ap-southeast-1",
}: endpoint{
@@ -26612,6 +29085,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-southeast-4",
+ }: endpoint{
+ Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -26630,6 +29112,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-central-2",
+ }: endpoint{
+ Hostname: "wafv2-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-north-1",
}: endpoint{
@@ -26648,6 +29139,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-south-2",
+ }: endpoint{
+ Hostname: "wafv2-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-west-1",
}: endpoint{
@@ -26675,6 +29175,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-me-central-1",
+ }: endpoint{
+ Hostname: "wafv2-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-me-south-1",
}: endpoint{
@@ -26729,6 +29238,23 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "wafv2.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -27160,6 +29686,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -27494,9 +30023,21 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"autoscaling": service{
@@ -27728,7 +30269,10 @@ var awscnPartition = partition{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
@@ -27754,6 +30298,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "datasync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"dax": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -28094,14 +30648,6 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
- endpointKey{
- Region: "dataplane-cn-north-1",
- }: endpoint{
- Hostname: "greengrass.ats.iot.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
},
},
"guardduty": service{
@@ -28154,6 +30700,31 @@ var awscnPartition = partition{
},
},
},
+ "internetmonitor": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
"iot": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -28224,6 +30795,31 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "kendra-ranking": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
"kinesis": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -28244,6 +30840,13 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "kinesisvideo": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
"kms": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -28328,6 +30931,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"monitoring": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -28373,6 +30986,16 @@ var awscnPartition = partition{
},
},
},
+ "oam": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"organizations": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@@ -28453,7 +31076,9 @@ var awscnPartition = partition{
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
defaultKey{
Variant: fipsVariant,
}: endpoint{
@@ -28484,6 +31109,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "rolesanywhere": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"route53": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@@ -28665,6 +31300,33 @@ var awscnPartition = partition{
},
},
"servicediscovery": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn",
+ },
+ },
+ },
+ "servicequotas": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
@@ -29098,6 +31760,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -29106,6 +31786,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"acm": service{
@@ -29360,6 +32058,9 @@ var awsusgovPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -29577,6 +32278,24 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -29620,6 +32339,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
@@ -29629,6 +32354,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
@@ -29756,6 +32487,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cassandra.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "cassandra.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -29764,6 +32513,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cassandra.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "cassandra.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"cloudcontrolapi": service{
@@ -29811,6 +32578,21 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "clouddirectory.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "clouddirectory.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"cloudformation": service{
@@ -29823,6 +32605,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -29831,6 +32631,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"cloudhsm": service{
@@ -30149,6 +32967,26 @@ var awsusgovPartition = partition{
},
},
},
+ "compute-optimizer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"config": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -30304,9 +33142,24 @@ var awsusgovPartition = partition{
},
"databrew": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "databrew.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "databrew.us-gov-west-1.amazonaws.com",
+ },
},
},
"datasync": service{
@@ -30374,9 +33227,39 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dlm.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "dlm.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dlm.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "dlm.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"dms": service{
@@ -30753,6 +33636,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -30761,6 +33662,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"elasticfilesystem": service{
@@ -30927,6 +33846,16 @@ var awsusgovPartition = partition{
},
},
},
+ "emr-containers": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"es": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -31198,21 +34127,43 @@ var awsusgovPartition = partition{
"glacier": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-gov-east-1",
+ Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "glacier.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "glacier.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
+ Hostname: "glacier.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "glacier.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "glacier.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
},
},
},
@@ -31283,36 +34234,38 @@ var awsusgovPartition = partition{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
- Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com",
+ Hostname: "greengrass.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-gov-east-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "greengrass.us-gov-east-1.amazonaws.com",
+ Hostname: "greengrass.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-gov-east-1",
+ Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
+ Hostname: "greengrass.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
}: endpoint{
Hostname: "greengrass.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
},
},
},
@@ -31511,6 +34464,21 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"inspector": service{
@@ -31553,6 +34521,41 @@ var awsusgovPartition = partition{
},
},
},
+ "inspector2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "internetmonitor": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "internetmonitor.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "internetmonitor.us-gov-west-1.api.aws",
+ },
+ },
+ },
"iot": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -31722,6 +34725,28 @@ var awsusgovPartition = partition{
},
},
},
+ "iottwinmaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"kafka": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -31754,8 +34779,51 @@ var awsusgovPartition = partition{
},
},
},
+ "kendra-ranking": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-gov-west-1.api.aws",
+ },
+ },
+ },
"kinesis": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "kinesis.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "kinesis.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
@@ -31764,6 +34832,15 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesis.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -31772,6 +34849,15 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesis.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"kinesisanalytics": service{
@@ -32015,12 +35101,22 @@ var awsusgovPartition = partition{
"mediaconvert": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
},
},
},
@@ -32081,6 +35177,16 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"models.lex": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -32335,35 +35441,75 @@ var awsusgovPartition = partition{
"outposts": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-gov-east-1",
+ Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "outposts.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "outposts.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "outposts.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "outposts.us-gov-west-1.amazonaws.com",
},
},
},
"participant.connect": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "participant.connect.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "participant.connect.us-gov-west-1.amazonaws.com",
},
},
},
+ "pi": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"pinpoint": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -32463,6 +35609,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "ram.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -32471,6 +35635,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "ram.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"rbin": service{
@@ -32650,7 +35832,9 @@ var awsusgovPartition = partition{
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
defaultKey{
Variant: fipsVariant,
}: endpoint{
@@ -32815,6 +35999,9 @@ var awsusgovPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -33159,21 +36346,45 @@ var awsusgovPartition = partition{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
},
},
},
@@ -33267,6 +36478,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
@@ -33285,6 +36502,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-gov-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
@@ -33395,9 +36618,24 @@ var awsusgovPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com",
+ },
},
},
"snowball": service{
@@ -33823,6 +37061,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "swf.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "swf.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -33831,6 +37087,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "swf.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "swf.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"synthetics": service{
@@ -34679,6 +37953,13 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "glue": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"health": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -34800,6 +38081,13 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"monitoring": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -34898,6 +38186,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"snowball": service{
@@ -35054,6 +38345,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
},
@@ -35180,6 +38474,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "dlm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"dms": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -35454,6 +38755,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"monitoring": service{
Endpoints: serviceEndpoints{
endpointKey{
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 4293dbe10bda..cbccb60bbe8e 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -174,7 +174,6 @@ const (
// Options provides the means to control how a Session is created and what
// configuration values will be loaded.
-//
type Options struct {
// Provides config values for the SDK to use when creating service clients
// and making API requests to services. Any value set in with this field
@@ -224,7 +223,7 @@ type Options struct {
// from stdin for the MFA token code.
//
// This field is only used if the shared configuration is enabled, and
- // the config enables assume role wit MFA via the mfa_serial field.
+ // the config enables assume role with MFA via the mfa_serial field.
AssumeRoleTokenProvider func() (string, error)
// When the SDK's shared config is configured to assume a role this option
@@ -322,24 +321,24 @@ type Options struct {
// credentials file. Enabling the Shared Config will also allow the Session
// to be built with retrieving credentials with AssumeRole set in the config.
//
-// // Equivalent to session.New
-// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+// // Equivalent to session.New
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
//
-// // Specify profile to load for the session's config
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// Profile: "profile_name",
-// }))
+// // Specify profile to load for the session's config
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Profile: "profile_name",
+// }))
//
-// // Specify profile for config and region for requests
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// Config: aws.Config{Region: aws.String("us-east-1")},
-// Profile: "profile_name",
-// }))
+// // Specify profile for config and region for requests
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Config: aws.Config{Region: aws.String("us-east-1")},
+// Profile: "profile_name",
+// }))
//
-// // Force enable Shared Config support
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// SharedConfigState: session.SharedConfigEnable,
-// }))
+// // Force enable Shared Config support
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// }))
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
var err error
@@ -375,7 +374,7 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
// This helper is intended to be used in variable initialization to load the
// Session and configuration at startup. Such as:
//
-// var sess = session.Must(session.NewSession())
+// var sess = session.Must(session.NewSession())
func Must(sess *Session, err error) *Session {
if err != nil {
panic(err)
@@ -780,16 +779,6 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode)
}
- // Configure credentials if not already set by the user when creating the
- // Session.
- if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
- creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
- if err != nil {
- return err
- }
- cfg.Credentials = creds
- }
-
cfg.S3UseARNRegion = userCfg.S3UseARNRegion
if cfg.S3UseARNRegion == nil {
cfg.S3UseARNRegion = &envCfg.S3UseARNRegion
@@ -812,6 +801,17 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
}
}
+ // Configure credentials if not already set by the user when creating the Session.
+ // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers.
+ // ticket: P83606045
+ if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+ creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
+ if err != nil {
+ return err
+ }
+ cfg.Credentials = creds
+ }
+
return nil
}
@@ -845,8 +845,8 @@ func initHandlers(s *Session) {
// and handlers. If any additional configs are provided they will be merged
// on top of the Session's copied config.
//
-// // Create a copy of the current Session, configured for the us-west-2 region.
-// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+// // Create a copy of the current Session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{
Config: s.Config.Copy(cfgs...),
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/version.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 2417675c264d..3c5e4a8a3096 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.44.147"
+const SDKVersion = "1.44.241"
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
index c0c52e2db0f3..9c1ccde54aea 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
@@ -13,17 +13,46 @@ import (
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
)
+const (
+ awsQueryError = "x-amzn-query-error"
+ // A valid header example - "x-amzn-query-error": ";"
+ awsQueryErrorPartsCount = 2
+)
+
// UnmarshalTypedError provides unmarshaling errors API response errors
// for both typed and untyped errors.
type UnmarshalTypedError struct {
- exceptions map[string]func(protocol.ResponseMetadata) error
+ exceptions map[string]func(protocol.ResponseMetadata) error
+ queryExceptions map[string]func(protocol.ResponseMetadata, string) error
}
// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
// set of exception names to the error unmarshalers
func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
return &UnmarshalTypedError{
- exceptions: exceptions,
+ exceptions: exceptions,
+ queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{},
+ }
+}
+
+// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError
+// before returning it
+func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError {
+ unmarshaledError := NewUnmarshalTypedError(exceptions)
+ for _, fn := range optFns {
+ fn(unmarshaledError)
+ }
+ return unmarshaledError
+}
+
+// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions.
+// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found.
+// See also [awsQueryCompatible trait]
+//
+// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait
+func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) {
+ return func(typedError *UnmarshalTypedError) {
+ typedError.queryExceptions = queryExceptions
}
}
@@ -50,18 +79,32 @@ func (u *UnmarshalTypedError) UnmarshalError(
code := codeParts[len(codeParts)-1]
msg := jsonErr.Message
+ queryCodeParts := queryCodeParts(resp, u)
+
if fn, ok := u.exceptions[code]; ok {
- // If exception code is know, use associated constructor to get a value
+ // If query-compatible exceptions are found and query-error-header is found,
+ // then use associated constructor to get exception with query error code.
+ //
+ // If exception code is known, use associated constructor to get a value
// for the exception that the JSON body can be unmarshaled into.
- v := fn(respMeta)
+ var v error
+ queryErrFn, queryExceptionsFound := u.queryExceptions[code]
+ if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound {
+ v = queryErrFn(respMeta, queryCodeParts[0])
+ } else {
+ v = fn(respMeta)
+ }
err := jsonutil.UnmarshalJSONCaseInsensitive(v, body)
if err != nil {
return nil, err
}
-
return v, nil
}
+ if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 {
+ code = queryCodeParts[0]
+ }
+
// fallback to unmodeled generic exceptions
return awserr.NewRequestFailure(
awserr.New(code, msg, nil),
@@ -70,6 +113,16 @@ func (u *UnmarshalTypedError) UnmarshalError(
), nil
}
+// A valid header example - "x-amzn-query-error": ";"
+func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string {
+ queryCodeHeader := resp.Header.Get(awsQueryError)
+ var queryCodeParts []string
+ if queryCodeHeader != "" && len(u.queryExceptions) > 0 {
+ queryCodeParts = strings.Split(queryCodeHeader, ";")
+ }
+ return queryCodeParts
+}
+
// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc
// protocol request errors
var UnmarshalErrorHandler = request.NamedHandler{
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
index d756d8cc5296..4fffd0427bad 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
@@ -45,7 +45,7 @@ func (u *UnmarshalTypedError) UnmarshalError(
msg := resp.Header.Get(errorMessageHeader)
body := resp.Body
- if len(code) == 0 {
+ if len(code) == 0 || len(msg) == 0 {
// If unable to get code from HTTP headers have to parse JSON message
// to determine what kind of exception this will be.
var buf bytes.Buffer
@@ -57,7 +57,9 @@ func (u *UnmarshalTypedError) UnmarshalError(
}
body = ioutil.NopCloser(&buf)
- code = jsonErr.Code
+ if len(code) == 0 {
+ code = jsonErr.Code
+ }
msg = jsonErr.Message
}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
index 33c06a6589ed..b98e7076e3c9 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
@@ -155,6 +155,12 @@ func (c *AutoScaling) AttachLoadBalancerTargetGroupsRequest(input *AttachLoadBal
// AttachLoadBalancerTargetGroups API operation for Auto Scaling.
//
+// This API operation is superseded by AttachTrafficSources, which can attach
+// multiple traffic sources types. We recommend using AttachTrafficSources to
+// simplify how you manage traffic sources. However, we continue to support
+// AttachLoadBalancerTargetGroups. You can use both the original AttachLoadBalancerTargetGroups
+// API operation and AttachTrafficSources on the same Auto Scaling group.
+//
// Attaches one or more target groups to the specified Auto Scaling group.
//
// This operation is used with the following load balancer types:
@@ -260,8 +266,11 @@ func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput
// AttachLoadBalancers API operation for Auto Scaling.
//
-// To attach an Application Load Balancer, Network Load Balancer, or Gateway
-// Load Balancer, use the AttachLoadBalancerTargetGroups API operation instead.
+// This API operation is superseded by AttachTrafficSources, which can attach
+// multiple traffic sources types. We recommend using AttachTrafficSources to
+// simplify how you manage traffic sources. However, we continue to support
+// AttachLoadBalancers. You can use both the original AttachLoadBalancers API
+// operation and AttachTrafficSources on the same Auto Scaling group.
//
// Attaches one or more Classic Load Balancers to the specified Auto Scaling
// group. Amazon EC2 Auto Scaling registers the running instances with these
@@ -316,6 +325,110 @@ func (c *AutoScaling) AttachLoadBalancersWithContext(ctx aws.Context, input *Att
return out, req.Send()
}
+const opAttachTrafficSources = "AttachTrafficSources"
+
+// AttachTrafficSourcesRequest generates a "aws/request.Request" representing the
+// client's request for the AttachTrafficSources operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AttachTrafficSources for more information on using the AttachTrafficSources
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the AttachTrafficSourcesRequest method.
+// req, resp := client.AttachTrafficSourcesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachTrafficSources
+func (c *AutoScaling) AttachTrafficSourcesRequest(input *AttachTrafficSourcesInput) (req *request.Request, output *AttachTrafficSourcesOutput) {
+ op := &request.Operation{
+ Name: opAttachTrafficSources,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AttachTrafficSourcesInput{}
+ }
+
+ output = &AttachTrafficSourcesOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// AttachTrafficSources API operation for Auto Scaling.
+//
+// Attaches one or more traffic sources to the specified Auto Scaling group.
+//
+// You can use any of the following as traffic sources for an Auto Scaling group:
+//
+// - Application Load Balancer
+//
+// - Classic Load Balancer
+//
+// - Gateway Load Balancer
+//
+// - Network Load Balancer
+//
+// - VPC Lattice
+//
+// This operation is additive and does not detach existing traffic sources from
+// the Auto Scaling group.
+//
+// After the operation completes, use the DescribeTrafficSources API to return
+// details about the state of the attachments between traffic sources and your
+// Auto Scaling group. To detach a traffic source from the Auto Scaling group,
+// call the DetachTrafficSources API.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Auto Scaling's
+// API operation AttachTrafficSources for usage and error information.
+//
+// Returned Error Codes:
+//
+// - ErrCodeResourceContentionFault "ResourceContention"
+// You already have a pending update to an Amazon EC2 Auto Scaling resource
+// (for example, an Auto Scaling group, instance, or load balancer).
+//
+// - ErrCodeServiceLinkedRoleFailure "ServiceLinkedRoleFailure"
+// The service-linked role is not yet ready for use.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachTrafficSources
+func (c *AutoScaling) AttachTrafficSources(input *AttachTrafficSourcesInput) (*AttachTrafficSourcesOutput, error) {
+ req, out := c.AttachTrafficSourcesRequest(input)
+ return out, req.Send()
+}
+
+// AttachTrafficSourcesWithContext is the same as AttachTrafficSources with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AttachTrafficSources for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *AutoScaling) AttachTrafficSourcesWithContext(ctx aws.Context, input *AttachTrafficSourcesInput, opts ...request.Option) (*AttachTrafficSourcesOutput, error) {
+ req, out := c.AttachTrafficSourcesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opBatchDeleteScheduledAction = "BatchDeleteScheduledAction"
// BatchDeleteScheduledActionRequest generates a "aws/request.Request" representing the
@@ -529,14 +642,17 @@ func (c *AutoScaling) CancelInstanceRefreshRequest(input *CancelInstanceRefreshI
// CancelInstanceRefresh API operation for Auto Scaling.
//
-// Cancels an instance refresh operation in progress. Cancellation does not
-// roll back any replacements that have already been completed, but it prevents
-// new replacements from being started.
+// Cancels an instance refresh or rollback that is in progress. If an instance
+// refresh or rollback is not in progress, an ActiveInstanceRefreshNotFound
+// error occurs.
//
// This operation is part of the instance refresh feature (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
// in Amazon EC2 Auto Scaling, which helps you update instances in your Auto
// Scaling group after you make configuration changes.
//
+// When you cancel an instance refresh, this does not roll back any changes
+// that it made. Use the RollbackInstanceRefresh API to roll back instead.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -557,8 +673,8 @@ func (c *AutoScaling) CancelInstanceRefreshRequest(input *CancelInstanceRefreshI
// (for example, an Auto Scaling group, instance, or load balancer).
//
// - ErrCodeActiveInstanceRefreshNotFoundFault "ActiveInstanceRefreshNotFound"
-// The request failed because an active instance refresh for the specified Auto
-// Scaling group was not found.
+// The request failed because an active instance refresh or rollback for the
+// specified Auto Scaling group was not found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CancelInstanceRefresh
func (c *AutoScaling) CancelInstanceRefresh(input *CancelInstanceRefreshInput) (*CancelInstanceRefreshOutput, error) {
@@ -2315,28 +2431,13 @@ func (c *AutoScaling) DescribeInstanceRefreshesRequest(input *DescribeInstanceRe
// in Amazon EC2 Auto Scaling, which helps you update instances in your Auto
// Scaling group after you make configuration changes.
//
-// To help you determine the status of an instance refresh, this operation returns
-// information about the instance refreshes you previously initiated, including
-// their status, end time, the percentage of the instance refresh that is complete,
-// and the number of instances remaining to update before the instance refresh
-// is complete.
-//
-// The following are the possible statuses:
-//
-// - Pending - The request was created, but the operation has not started.
-//
-// - InProgress - The operation is in progress.
-//
-// - Successful - The operation completed successfully.
-//
-// - Failed - The operation failed to complete. You can troubleshoot using
-// the status reason and the scaling activities.
-//
-// - Cancelling - An ongoing operation is being cancelled. Cancellation does
-// not roll back any replacements that have already been completed, but it
-// prevents new replacements from being started.
-//
-// - Cancelled - The operation is cancelled.
+// To help you determine the status of an instance refresh, Amazon EC2 Auto
+// Scaling returns information about the instance refreshes you previously initiated,
+// including their status, start time, end time, the percentage of the instance
+// refresh that is complete, and the number of instances remaining to update
+// before the instance refresh is complete. If a rollback is initiated while
+// an instance refresh is in progress, Amazon EC2 Auto Scaling also returns
+// information about the rollback of the instance refresh.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2724,6 +2825,12 @@ func (c *AutoScaling) DescribeLoadBalancerTargetGroupsRequest(input *DescribeLoa
// DescribeLoadBalancerTargetGroups API operation for Auto Scaling.
//
+// This API operation is superseded by DescribeTrafficSources, which can describe
+// multiple traffic sources types. We recommend using DetachTrafficSources to
+// simplify how you manage traffic sources. However, we continue to support
+// DescribeLoadBalancerTargetGroups. You can use both the original DescribeLoadBalancerTargetGroups
+// API operation and DescribeTrafficSources on the same Auto Scaling group.
+//
// Gets information about the Elastic Load Balancing target groups for the specified
// Auto Scaling group.
//
@@ -2750,6 +2857,10 @@ func (c *AutoScaling) DescribeLoadBalancerTargetGroupsRequest(input *DescribeLoa
// Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html)
// in the Amazon EC2 Auto Scaling User Guide.
//
+// You can use this operation to describe target groups that were attached by
+// using AttachLoadBalancerTargetGroups, but not for target groups that were
+// attached by using AttachTrafficSources.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2831,11 +2942,17 @@ func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersI
// DescribeLoadBalancers API operation for Auto Scaling.
//
+// This API operation is superseded by DescribeTrafficSources, which can describe
+// multiple traffic sources types. We recommend using DescribeTrafficSources
+// to simplify how you manage traffic sources. However, we continue to support
+// DescribeLoadBalancers. You can use both the original DescribeLoadBalancers
+// API operation and DescribeTrafficSources on the same Auto Scaling group.
+//
// Gets information about the load balancers for the specified Auto Scaling
// group.
//
// This operation describes only Classic Load Balancers. If you have Application
-// Load Balancers, Network Load Balancers, or Gateway Load Balancer, use the
+// Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the
// DescribeLoadBalancerTargetGroups API instead.
//
// To determine the attachment status of the load balancer, use the State element
@@ -3873,6 +3990,153 @@ func (c *AutoScaling) DescribeTerminationPolicyTypesWithContext(ctx aws.Context,
return out, req.Send()
}
+const opDescribeTrafficSources = "DescribeTrafficSources"
+
+// DescribeTrafficSourcesRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeTrafficSources operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeTrafficSources for more information on using the DescribeTrafficSources
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeTrafficSourcesRequest method.
+// req, resp := client.DescribeTrafficSourcesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeTrafficSources
+func (c *AutoScaling) DescribeTrafficSourcesRequest(input *DescribeTrafficSourcesInput) (req *request.Request, output *DescribeTrafficSourcesOutput) {
+ op := &request.Operation{
+ Name: opDescribeTrafficSources,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxRecords",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeTrafficSourcesInput{}
+ }
+
+ output = &DescribeTrafficSourcesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeTrafficSources API operation for Auto Scaling.
+//
+// Gets information about the traffic sources for the specified Auto Scaling
+// group.
+//
+// You can optionally provide a traffic source type. If you provide a traffic
+// source type, then the results only include that traffic source type.
+//
+// If you do not provide a traffic source type, then the results include all
+// the traffic sources for the specified Auto Scaling group.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Auto Scaling's
+// API operation DescribeTrafficSources for usage and error information.
+//
+// Returned Error Codes:
+//
+// - ErrCodeResourceContentionFault "ResourceContention"
+// You already have a pending update to an Amazon EC2 Auto Scaling resource
+// (for example, an Auto Scaling group, instance, or load balancer).
+//
+// - ErrCodeInvalidNextToken "InvalidNextToken"
+// The NextToken value is not valid.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeTrafficSources
+func (c *AutoScaling) DescribeTrafficSources(input *DescribeTrafficSourcesInput) (*DescribeTrafficSourcesOutput, error) {
+ req, out := c.DescribeTrafficSourcesRequest(input)
+ return out, req.Send()
+}
+
+// DescribeTrafficSourcesWithContext is the same as DescribeTrafficSources with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeTrafficSources for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *AutoScaling) DescribeTrafficSourcesWithContext(ctx aws.Context, input *DescribeTrafficSourcesInput, opts ...request.Option) (*DescribeTrafficSourcesOutput, error) {
+ req, out := c.DescribeTrafficSourcesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeTrafficSourcesPages iterates over the pages of a DescribeTrafficSources operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeTrafficSources method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeTrafficSources operation.
+// pageNum := 0
+// err := client.DescribeTrafficSourcesPages(params,
+// func(page *autoscaling.DescribeTrafficSourcesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *AutoScaling) DescribeTrafficSourcesPages(input *DescribeTrafficSourcesInput, fn func(*DescribeTrafficSourcesOutput, bool) bool) error {
+ return c.DescribeTrafficSourcesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeTrafficSourcesPagesWithContext same as DescribeTrafficSourcesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *AutoScaling) DescribeTrafficSourcesPagesWithContext(ctx aws.Context, input *DescribeTrafficSourcesInput, fn func(*DescribeTrafficSourcesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeTrafficSourcesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeTrafficSourcesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeTrafficSourcesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeWarmPool = "DescribeWarmPool"
// DescribeWarmPoolRequest generates a "aws/request.Request" representing the
@@ -4103,6 +4367,12 @@ func (c *AutoScaling) DetachLoadBalancerTargetGroupsRequest(input *DetachLoadBal
// DetachLoadBalancerTargetGroups API operation for Auto Scaling.
//
+// This API operation is superseded by DetachTrafficSources, which can detach
+// multiple traffic sources types. We recommend using DetachTrafficSources to
+// simplify how you manage traffic sources. However, we continue to support
+// DetachLoadBalancerTargetGroups. You can use both the original DetachLoadBalancerTargetGroups
+// API operation and DetachTrafficSources on the same Auto Scaling group.
+//
// Detaches one or more target groups from the specified Auto Scaling group.
//
// When you detach a target group, it enters the Removing state while deregistering
@@ -4110,6 +4380,10 @@ func (c *AutoScaling) DetachLoadBalancerTargetGroupsRequest(input *DetachLoadBal
// can no longer describe the target group using the DescribeLoadBalancerTargetGroups
// API call. The instances remain running.
//
+// You can use this operation to detach target groups that were attached by
+// using AttachLoadBalancerTargetGroups, but not for target groups that were
+// attached by using AttachTrafficSources.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -4188,11 +4462,17 @@ func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput
// DetachLoadBalancers API operation for Auto Scaling.
//
+// This API operation is superseded by DetachTrafficSources, which can detach
+// multiple traffic sources types. We recommend using DetachTrafficSources to
+// simplify how you manage traffic sources. However, we continue to support
+// DetachLoadBalancers. You can use both the original DetachLoadBalancers API
+// operation and DetachTrafficSources on the same Auto Scaling group.
+//
// Detaches one or more Classic Load Balancers from the specified Auto Scaling
// group.
//
// This operation detaches only Classic Load Balancers. If you have Application
-// Load Balancers, Network Load Balancers, or Gateway Load Balancer, use the
+// Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the
// DetachLoadBalancerTargetGroups API instead.
//
// When you detach a load balancer, it enters the Removing state while deregistering
@@ -4234,153 +4514,238 @@ func (c *AutoScaling) DetachLoadBalancersWithContext(ctx aws.Context, input *Det
return out, req.Send()
}
-const opDisableMetricsCollection = "DisableMetricsCollection"
+const opDetachTrafficSources = "DetachTrafficSources"
-// DisableMetricsCollectionRequest generates a "aws/request.Request" representing the
-// client's request for the DisableMetricsCollection operation. The "output" return
+// DetachTrafficSourcesRequest generates a "aws/request.Request" representing the
+// client's request for the DetachTrafficSources operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
-// See DisableMetricsCollection for more information on using the DisableMetricsCollection
+// See DetachTrafficSources for more information on using the DetachTrafficSources
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// // Example sending a request using the DisableMetricsCollectionRequest method.
-// req, resp := client.DisableMetricsCollectionRequest(params)
+// // Example sending a request using the DetachTrafficSourcesRequest method.
+// req, resp := client.DetachTrafficSourcesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DisableMetricsCollection
-func (c *AutoScaling) DisableMetricsCollectionRequest(input *DisableMetricsCollectionInput) (req *request.Request, output *DisableMetricsCollectionOutput) {
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachTrafficSources
+func (c *AutoScaling) DetachTrafficSourcesRequest(input *DetachTrafficSourcesInput) (req *request.Request, output *DetachTrafficSourcesOutput) {
op := &request.Operation{
- Name: opDisableMetricsCollection,
+ Name: opDetachTrafficSources,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
- input = &DisableMetricsCollectionInput{}
+ input = &DetachTrafficSourcesInput{}
}
- output = &DisableMetricsCollectionOutput{}
+ output = &DetachTrafficSourcesOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
-// DisableMetricsCollection API operation for Auto Scaling.
+// DetachTrafficSources API operation for Auto Scaling.
//
-// Disables group metrics collection for the specified Auto Scaling group.
+// Detaches one or more traffic sources from the specified Auto Scaling group.
+//
+// When you detach a taffic, it enters the Removing state while deregistering
+// the instances in the group. When all instances are deregistered, then you
+// can no longer describe the traffic source using the DescribeTrafficSources
+// API call. The instances continue to run.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Auto Scaling's
-// API operation DisableMetricsCollection for usage and error information.
+// API operation DetachTrafficSources for usage and error information.
//
// Returned Error Codes:
// - ErrCodeResourceContentionFault "ResourceContention"
// You already have a pending update to an Amazon EC2 Auto Scaling resource
// (for example, an Auto Scaling group, instance, or load balancer).
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DisableMetricsCollection
-func (c *AutoScaling) DisableMetricsCollection(input *DisableMetricsCollectionInput) (*DisableMetricsCollectionOutput, error) {
- req, out := c.DisableMetricsCollectionRequest(input)
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachTrafficSources
+func (c *AutoScaling) DetachTrafficSources(input *DetachTrafficSourcesInput) (*DetachTrafficSourcesOutput, error) {
+ req, out := c.DetachTrafficSourcesRequest(input)
return out, req.Send()
}
-// DisableMetricsCollectionWithContext is the same as DisableMetricsCollection with the addition of
+// DetachTrafficSourcesWithContext is the same as DetachTrafficSources with the addition of
// the ability to pass a context and additional request options.
//
-// See DisableMetricsCollection for details on how to use this API operation.
+// See DetachTrafficSources for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
-func (c *AutoScaling) DisableMetricsCollectionWithContext(ctx aws.Context, input *DisableMetricsCollectionInput, opts ...request.Option) (*DisableMetricsCollectionOutput, error) {
- req, out := c.DisableMetricsCollectionRequest(input)
+func (c *AutoScaling) DetachTrafficSourcesWithContext(ctx aws.Context, input *DetachTrafficSourcesInput, opts ...request.Option) (*DetachTrafficSourcesOutput, error) {
+ req, out := c.DetachTrafficSourcesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
-const opEnableMetricsCollection = "EnableMetricsCollection"
+const opDisableMetricsCollection = "DisableMetricsCollection"
-// EnableMetricsCollectionRequest generates a "aws/request.Request" representing the
-// client's request for the EnableMetricsCollection operation. The "output" return
+// DisableMetricsCollectionRequest generates a "aws/request.Request" representing the
+// client's request for the DisableMetricsCollection operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
-// See EnableMetricsCollection for more information on using the EnableMetricsCollection
+// See DisableMetricsCollection for more information on using the DisableMetricsCollection
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// // Example sending a request using the EnableMetricsCollectionRequest method.
-// req, resp := client.EnableMetricsCollectionRequest(params)
+// // Example sending a request using the DisableMetricsCollectionRequest method.
+// req, resp := client.DisableMetricsCollectionRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnableMetricsCollection
-func (c *AutoScaling) EnableMetricsCollectionRequest(input *EnableMetricsCollectionInput) (req *request.Request, output *EnableMetricsCollectionOutput) {
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DisableMetricsCollection
+func (c *AutoScaling) DisableMetricsCollectionRequest(input *DisableMetricsCollectionInput) (req *request.Request, output *DisableMetricsCollectionOutput) {
op := &request.Operation{
- Name: opEnableMetricsCollection,
+ Name: opDisableMetricsCollection,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
- input = &EnableMetricsCollectionInput{}
+ input = &DisableMetricsCollectionInput{}
}
- output = &EnableMetricsCollectionOutput{}
+ output = &DisableMetricsCollectionOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
-// EnableMetricsCollection API operation for Auto Scaling.
-//
-// Enables group metrics collection for the specified Auto Scaling group.
+// DisableMetricsCollection API operation for Auto Scaling.
//
-// You can use these metrics to track changes in an Auto Scaling group and to
-// set alarms on threshold values. You can view group metrics using the Amazon
-// EC2 Auto Scaling console or the CloudWatch console. For more information,
-// see Monitor CloudWatch metrics for your Auto Scaling groups and instances
-// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html)
-// in the Amazon EC2 Auto Scaling User Guide.
+// Disables group metrics collection for the specified Auto Scaling group.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Auto Scaling's
-// API operation EnableMetricsCollection for usage and error information.
+// API operation DisableMetricsCollection for usage and error information.
//
// Returned Error Codes:
// - ErrCodeResourceContentionFault "ResourceContention"
// You already have a pending update to an Amazon EC2 Auto Scaling resource
// (for example, an Auto Scaling group, instance, or load balancer).
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnableMetricsCollection
-func (c *AutoScaling) EnableMetricsCollection(input *EnableMetricsCollectionInput) (*EnableMetricsCollectionOutput, error) {
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DisableMetricsCollection
+func (c *AutoScaling) DisableMetricsCollection(input *DisableMetricsCollectionInput) (*DisableMetricsCollectionOutput, error) {
+ req, out := c.DisableMetricsCollectionRequest(input)
+ return out, req.Send()
+}
+
+// DisableMetricsCollectionWithContext is the same as DisableMetricsCollection with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisableMetricsCollection for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *AutoScaling) DisableMetricsCollectionWithContext(ctx aws.Context, input *DisableMetricsCollectionInput, opts ...request.Option) (*DisableMetricsCollectionOutput, error) {
+ req, out := c.DisableMetricsCollectionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opEnableMetricsCollection = "EnableMetricsCollection"
+
+// EnableMetricsCollectionRequest generates a "aws/request.Request" representing the
+// client's request for the EnableMetricsCollection operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See EnableMetricsCollection for more information on using the EnableMetricsCollection
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the EnableMetricsCollectionRequest method.
+// req, resp := client.EnableMetricsCollectionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnableMetricsCollection
+func (c *AutoScaling) EnableMetricsCollectionRequest(input *EnableMetricsCollectionInput) (req *request.Request, output *EnableMetricsCollectionOutput) {
+ op := &request.Operation{
+ Name: opEnableMetricsCollection,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &EnableMetricsCollectionInput{}
+ }
+
+ output = &EnableMetricsCollectionOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// EnableMetricsCollection API operation for Auto Scaling.
+//
+// Enables group metrics collection for the specified Auto Scaling group.
+//
+// You can use these metrics to track changes in an Auto Scaling group and to
+// set alarms on threshold values. You can view group metrics using the Amazon
+// EC2 Auto Scaling console or the CloudWatch console. For more information,
+// see Monitor CloudWatch metrics for your Auto Scaling groups and instances
+// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html)
+// in the Amazon EC2 Auto Scaling User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Auto Scaling's
+// API operation EnableMetricsCollection for usage and error information.
+//
+// Returned Error Codes:
+// - ErrCodeResourceContentionFault "ResourceContention"
+// You already have a pending update to an Amazon EC2 Auto Scaling resource
+// (for example, an Auto Scaling group, instance, or load balancer).
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnableMetricsCollection
+func (c *AutoScaling) EnableMetricsCollection(input *EnableMetricsCollectionInput) (*EnableMetricsCollectionOutput, error) {
req, out := c.EnableMetricsCollectionRequest(input)
return out, req.Send()
}
@@ -5487,6 +5852,124 @@ func (c *AutoScaling) ResumeProcessesWithContext(ctx aws.Context, input *Scaling
return out, req.Send()
}
+const opRollbackInstanceRefresh = "RollbackInstanceRefresh"
+
+// RollbackInstanceRefreshRequest generates a "aws/request.Request" representing the
+// client's request for the RollbackInstanceRefresh operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RollbackInstanceRefresh for more information on using the RollbackInstanceRefresh
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the RollbackInstanceRefreshRequest method.
+// req, resp := client.RollbackInstanceRefreshRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/RollbackInstanceRefresh
+func (c *AutoScaling) RollbackInstanceRefreshRequest(input *RollbackInstanceRefreshInput) (req *request.Request, output *RollbackInstanceRefreshOutput) {
+ op := &request.Operation{
+ Name: opRollbackInstanceRefresh,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RollbackInstanceRefreshInput{}
+ }
+
+ output = &RollbackInstanceRefreshOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// RollbackInstanceRefresh API operation for Auto Scaling.
+//
+// Cancels an instance refresh that is in progress and rolls back any changes
+// that it made. Amazon EC2 Auto Scaling replaces any instances that were replaced
+// during the instance refresh. This restores your Auto Scaling group to the
+// configuration that it was using before the start of the instance refresh.
+//
+// This operation is part of the instance refresh feature (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
+// in Amazon EC2 Auto Scaling, which helps you update instances in your Auto
+// Scaling group after you make configuration changes.
+//
+// A rollback is not supported in the following situations:
+//
+// - There is no desired configuration specified for the instance refresh.
+//
+// - The Auto Scaling group has a launch template that uses an Amazon Web
+// Services Systems Manager parameter instead of an AMI ID for the ImageId
+// property.
+//
+// - The Auto Scaling group uses the launch template's $Latest or $Default
+// version.
+//
+// When you receive a successful response from this operation, Amazon EC2 Auto
+// Scaling immediately begins replacing instances. You can check the status
+// of this operation through the DescribeInstanceRefreshes API operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Auto Scaling's
+// API operation RollbackInstanceRefresh for usage and error information.
+//
+// Returned Error Codes:
+//
+// - ErrCodeLimitExceededFault "LimitExceeded"
+// You have already reached a limit for your Amazon EC2 Auto Scaling resources
+// (for example, Auto Scaling groups, launch configurations, or lifecycle hooks).
+// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html)
+// in the Amazon EC2 Auto Scaling API Reference.
+//
+// - ErrCodeResourceContentionFault "ResourceContention"
+// You already have a pending update to an Amazon EC2 Auto Scaling resource
+// (for example, an Auto Scaling group, instance, or load balancer).
+//
+// - ErrCodeActiveInstanceRefreshNotFoundFault "ActiveInstanceRefreshNotFound"
+// The request failed because an active instance refresh or rollback for the
+// specified Auto Scaling group was not found.
+//
+// - ErrCodeIrreversibleInstanceRefreshFault "IrreversibleInstanceRefresh"
+// The request failed because a desired configuration was not found or an incompatible
+// launch template (uses a Systems Manager parameter instead of an AMI ID) or
+// launch template version ($Latest or $Default) is present on the Auto Scaling
+// group.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/RollbackInstanceRefresh
+func (c *AutoScaling) RollbackInstanceRefresh(input *RollbackInstanceRefreshInput) (*RollbackInstanceRefreshOutput, error) {
+ req, out := c.RollbackInstanceRefreshRequest(input)
+ return out, req.Send()
+}
+
+// RollbackInstanceRefreshWithContext is the same as RollbackInstanceRefresh with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RollbackInstanceRefresh for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *AutoScaling) RollbackInstanceRefreshWithContext(ctx aws.Context, input *RollbackInstanceRefreshInput, opts ...request.Option) (*RollbackInstanceRefreshOutput, error) {
+ req, out := c.RollbackInstanceRefreshRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opSetDesiredCapacity = "SetDesiredCapacity"
// SetDesiredCapacityRequest generates a "aws/request.Request" representing the
@@ -5801,9 +6284,9 @@ func (c *AutoScaling) StartInstanceRefreshRequest(input *StartInstanceRefreshInp
// StartInstanceRefresh API operation for Auto Scaling.
//
-// Starts a new instance refresh operation. An instance refresh performs a rolling
-// replacement of all or some instances in an Auto Scaling group. Each instance
-// is terminated first and then replaced, which temporarily reduces the capacity
+// Starts an instance refresh. During an instance refresh, Amazon EC2 Auto Scaling
+// performs a rolling update of instances in an Auto Scaling group. Instances
+// are terminated first and then replaced, which temporarily reduces the capacity
// available within your Auto Scaling group.
//
// This operation is part of the instance refresh feature (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
@@ -5813,11 +6296,23 @@ func (c *AutoScaling) StartInstanceRefreshRequest(input *StartInstanceRefreshInp
// that specifies the new AMI or user data script. Then start an instance refresh
// to immediately begin the process of updating instances in the group.
//
-// If the call succeeds, it creates a new instance refresh request with a unique
-// ID that you can use to track its progress. To query its status, call the
-// DescribeInstanceRefreshes API. To describe the instance refreshes that have
-// already run, call the DescribeInstanceRefreshes API. To cancel an instance
-// refresh operation in progress, use the CancelInstanceRefresh API.
+// If successful, the request's response contains a unique ID that you can use
+// to track the progress of the instance refresh. To query its status, call
+// the DescribeInstanceRefreshes API. To describe the instance refreshes that
+// have already run, call the DescribeInstanceRefreshes API. To cancel an instance
+// refresh that is in progress, use the CancelInstanceRefresh API.
+//
+// An instance refresh might fail for several reasons, such as EC2 launch failures,
+// misconfigured health checks, or not ignoring or allowing the termination
+// of instances that are in Standby state or protected from scale in. You can
+// monitor for failed EC2 launches using the scaling activities. To find the
+// scaling activities, call the DescribeScalingActivities API.
+//
+// If you enable auto rollback, your Auto Scaling group will be rolled back
+// automatically when the instance refresh fails. You can enable this feature
+// before starting an instance refresh by specifying the AutoRollback property
+// in the instance refresh preferences. Otherwise, to roll back an instance
+// refresh before it finishes, use the RollbackInstanceRefresh API.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5839,8 +6334,8 @@ func (c *AutoScaling) StartInstanceRefreshRequest(input *StartInstanceRefreshInp
// (for example, an Auto Scaling group, instance, or load balancer).
//
// - ErrCodeInstanceRefreshInProgressFault "InstanceRefreshInProgress"
-// The request failed because an active instance refresh operation already exists
-// for the specified Auto Scaling group.
+// The request failed because an active instance refresh already exists for
+// the specified Auto Scaling group.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/StartInstanceRefresh
func (c *AutoScaling) StartInstanceRefresh(input *StartInstanceRefreshInput) (*StartInstanceRefreshOutput, error) {
@@ -6574,7 +7069,7 @@ type AttachLoadBalancerTargetGroupsInput struct {
// AutoScalingGroupName is a required field
AutoScalingGroupName *string `min:"1" type:"string" required:"true"`
- // The Amazon Resource Names (ARN) of the target groups. You can specify up
+ // The Amazon Resource Names (ARNs) of the target groups. You can specify up
// to 10 target groups. To get the ARN of a target group, use the Elastic Load
// Balancing DescribeTargetGroups (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTargetGroups.html)
// API operation.
@@ -6739,6 +7234,102 @@ func (s AttachLoadBalancersOutput) GoString() string {
return s.String()
}
+type AttachTrafficSourcesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the Auto Scaling group.
+ //
+ // AutoScalingGroupName is a required field
+ AutoScalingGroupName *string `min:"1" type:"string" required:"true"`
+
+ // The unique identifiers of one or more traffic sources. You can specify up
+ // to 10 traffic sources.
+ //
+ // TrafficSources is a required field
+ TrafficSources []*TrafficSourceIdentifier `type:"list" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AttachTrafficSourcesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AttachTrafficSourcesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttachTrafficSourcesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AttachTrafficSourcesInput"}
+ if s.AutoScalingGroupName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName"))
+ }
+ if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1))
+ }
+ if s.TrafficSources == nil {
+ invalidParams.Add(request.NewErrParamRequired("TrafficSources"))
+ }
+ if s.TrafficSources != nil {
+ for i, v := range s.TrafficSources {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TrafficSources", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAutoScalingGroupName sets the AutoScalingGroupName field's value.
+func (s *AttachTrafficSourcesInput) SetAutoScalingGroupName(v string) *AttachTrafficSourcesInput {
+ s.AutoScalingGroupName = &v
+ return s
+}
+
+// SetTrafficSources sets the TrafficSources field's value.
+func (s *AttachTrafficSourcesInput) SetTrafficSources(v []*TrafficSourceIdentifier) *AttachTrafficSourcesInput {
+ s.TrafficSources = v
+ return s
+}
+
+type AttachTrafficSourcesOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AttachTrafficSourcesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AttachTrafficSourcesOutput) GoString() string {
+ return s.String()
+}
+
// Specifies the minimum and maximum for the BaselineEbsBandwidthMbps object
// when you specify InstanceRequirements for an Auto Scaling group.
type BaselineEbsBandwidthMbpsRequest struct {
@@ -7131,7 +7722,8 @@ func (s *CancelInstanceRefreshInput) SetAutoScalingGroupName(v string) *CancelIn
type CancelInstanceRefreshOutput struct {
_ struct{} `type:"structure"`
- // The instance refresh ID.
+ // The instance refresh ID associated with the request. This is the unique ID
+ // assigned to the instance refresh when it was started.
InstanceRefreshId *string `min:"1" type:"string"`
}
@@ -7377,23 +7969,24 @@ type CreateAutoScalingGroupInput struct {
// Default: 300 seconds
DefaultCooldown *int64 `type:"integer"`
- // The amount of time, in seconds, until a newly launched instance can contribute
- // to the Amazon CloudWatch metrics. This delay lets an instance finish initializing
- // before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in
- // more reliable usage data. Set this value equal to the amount of time that
- // it takes for resource consumption to become stable after an instance reaches
- // the InService state. For more information, see Set the default instance warmup
- // for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html)
+ // The amount of time, in seconds, until a new instance is considered to have
+ // finished initializing and resource consumption to become stable after it
+ // enters the InService state.
+ //
+ // During an instance refresh, Amazon EC2 Auto Scaling waits for the warm-up
+ // period after it replaces an instance before it moves on to replacing the
+ // next instance. Amazon EC2 Auto Scaling also waits for the warm-up period
+ // before aggregating the metrics for new instances with existing instances
+ // in the Amazon CloudWatch metrics that are used for scaling, resulting in
+ // more reliable usage data. For more information, see Set the default instance
+ // warmup for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html)
// in the Amazon EC2 Auto Scaling User Guide.
//
- // To manage your warm-up settings at the group level, we recommend that you
- // set the default instance warmup, even if its value is set to 0 seconds. This
- // also optimizes the performance of scaling policies that scale continuously,
- // such as target tracking and step scaling policies.
- //
- // If you need to remove a value that you previously set, include the property
- // but specify -1 for the value. However, we strongly recommend keeping the
- // default instance warmup enabled by specifying a minimum value of 0.
+ // To manage various warm-up settings at the group level, we recommend that
+ // you set the default instance warmup, even if it is set to 0 seconds. To remove
+ // a value that you previously set, include the property but specify -1 for
+ // the value. However, we strongly recommend keeping the default instance warmup
+ // enabled by specifying a value of 0 or other nominal value.
//
// Default: None
DefaultInstanceWarmup *int64 `type:"integer"`
@@ -7420,21 +8013,23 @@ type CreateAutoScalingGroupInput struct {
// The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before
// checking the health status of an EC2 instance that has come into service
- // and marking it unhealthy due to a failed Elastic Load Balancing or custom
- // health check. This is useful if your instances do not immediately pass these
- // health checks after they enter the InService state. For more information,
- // see Set the health check grace period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html)
+ // and marking it unhealthy due to a failed health check. This is useful if
+ // your instances do not immediately pass their health checks after they enter
+ // the InService state. For more information, see Set the health check grace
+ // period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html)
// in the Amazon EC2 Auto Scaling User Guide.
//
// Default: 0 seconds
HealthCheckGracePeriod *int64 `type:"integer"`
- // The service to use for the health checks. The valid values are EC2 (default)
- // and ELB. If you configure an Auto Scaling group to use load balancer (ELB)
- // health checks, it considers the instance unhealthy if it fails either the
- // EC2 status checks or the load balancer health checks. For more information,
- // see Health checks for Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html)
+ // A comma-separated value string of one or more health check types.
+ //
+ // The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health
+ // check and cannot be disabled. For more information, see Health checks for
+ // Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html)
// in the Amazon EC2 Auto Scaling User Guide.
+ //
+ // Only specify EC2 if you must clear a value that was previously set.
HealthCheckType *string `min:"1" type:"string"`
// The ID of the instance used to base the launch configuration on. If specified,
@@ -7472,7 +8067,7 @@ type CreateAutoScalingGroupInput struct {
// A list of Classic Load Balancers associated with this Auto Scaling group.
// For Application Load Balancers, Network Load Balancers, and Gateway Load
- // Balancer, specify the TargetGroupARNs property instead.
+ // Balancers, specify the TargetGroupARNs property instead.
LoadBalancerNames []*string `type:"list"`
// The maximum amount of time, in seconds, that an instance can be in service.
@@ -7538,11 +8133,12 @@ type CreateAutoScalingGroupInput struct {
// in the Amazon EC2 Auto Scaling User Guide.
Tags []*Tag `type:"list"`
- // The Amazon Resource Names (ARN) of the target groups to associate with the
- // Auto Scaling group. Instances are registered as targets with the target groups.
- // The target groups receive incoming traffic and route requests to one or more
- // registered targets. For more information, see Use Elastic Load Balancing
- // to distribute traffic across the instances in your Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html)
+ // The Amazon Resource Names (ARN) of the Elastic Load Balancing target groups
+ // to associate with the Auto Scaling group. Instances are registered as targets
+ // with the target groups. The target groups receive incoming traffic and route
+ // requests to one or more registered targets. For more information, see Use
+ // Elastic Load Balancing to distribute traffic across the instances in your
+ // Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html)
// in the Amazon EC2 Auto Scaling User Guide.
TargetGroupARNs []*string `type:"list"`
@@ -7556,6 +8152,12 @@ type CreateAutoScalingGroupInput struct {
// | arn:aws:lambda:region:account-id:function:my-function:my-alias
TerminationPolicies []*string `type:"list"`
+ // The list of traffic sources to attach to this Auto Scaling group. You can
+ // use any of the following as traffic sources for an Auto Scaling group: Classic
+ // Load Balancer, Application Load Balancer, Gateway Load Balancer, Network
+ // Load Balancer, and VPC Lattice.
+ TrafficSources []*TrafficSourceIdentifier `type:"list"`
+
// A comma-separated list of subnet IDs for a virtual private cloud (VPC) where
// instances in the Auto Scaling group can be created. If you specify VPCZoneIdentifier
// with AvailabilityZones, the subnets that you specify must reside in those
@@ -7647,6 +8249,16 @@ func (s *CreateAutoScalingGroupInput) Validate() error {
}
}
}
+ if s.TrafficSources != nil {
+ for i, v := range s.TrafficSources {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TrafficSources", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -7804,8 +8416,14 @@ func (s *CreateAutoScalingGroupInput) SetTerminationPolicies(v []*string) *Creat
return s
}
-// SetVPCZoneIdentifier sets the VPCZoneIdentifier field's value.
-func (s *CreateAutoScalingGroupInput) SetVPCZoneIdentifier(v string) *CreateAutoScalingGroupInput {
+// SetTrafficSources sets the TrafficSources field's value.
+func (s *CreateAutoScalingGroupInput) SetTrafficSources(v []*TrafficSourceIdentifier) *CreateAutoScalingGroupInput {
+ s.TrafficSources = v
+ return s
+}
+
+// SetVPCZoneIdentifier sets the VPCZoneIdentifier field's value.
+func (s *CreateAutoScalingGroupInput) SetVPCZoneIdentifier(v string) *CreateAutoScalingGroupInput {
s.VPCZoneIdentifier = &v
return s
}
@@ -8321,19 +8939,17 @@ type CustomizedMetricSpecification struct {
// The name of the metric. To get the exact metric name, namespace, and dimensions,
// inspect the Metric (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Metric.html)
// object that is returned by a call to ListMetrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html).
- //
- // MetricName is a required field
- MetricName *string `type:"string" required:"true"`
+ MetricName *string `type:"string"`
+
+ // The metrics to include in the target tracking scaling policy, as a metric
+ // data query. This can include both raw metric and metric math expressions.
+ Metrics []*TargetTrackingMetricDataQuery `type:"list"`
// The namespace of the metric.
- //
- // Namespace is a required field
- Namespace *string `type:"string" required:"true"`
+ Namespace *string `type:"string"`
// The statistic of the metric.
- //
- // Statistic is a required field
- Statistic *string `type:"string" required:"true" enum:"MetricStatistic"`
+ Statistic *string `type:"string" enum:"MetricStatistic"`
// The unit of the metric. For a complete list of the units that CloudWatch
// supports, see the MetricDatum (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html)
@@ -8362,15 +8978,6 @@ func (s CustomizedMetricSpecification) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *CustomizedMetricSpecification) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CustomizedMetricSpecification"}
- if s.MetricName == nil {
- invalidParams.Add(request.NewErrParamRequired("MetricName"))
- }
- if s.Namespace == nil {
- invalidParams.Add(request.NewErrParamRequired("Namespace"))
- }
- if s.Statistic == nil {
- invalidParams.Add(request.NewErrParamRequired("Statistic"))
- }
if s.Dimensions != nil {
for i, v := range s.Dimensions {
if v == nil {
@@ -8381,6 +8988,16 @@ func (s *CustomizedMetricSpecification) Validate() error {
}
}
}
+ if s.Metrics != nil {
+ for i, v := range s.Metrics {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metrics", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -8400,6 +9017,12 @@ func (s *CustomizedMetricSpecification) SetMetricName(v string) *CustomizedMetri
return s
}
+// SetMetrics sets the Metrics field's value.
+func (s *CustomizedMetricSpecification) SetMetrics(v []*TargetTrackingMetricDataQuery) *CustomizedMetricSpecification {
+ s.Metrics = v
+ return s
+}
+
// SetNamespace sets the Namespace field's value.
func (s *CustomizedMetricSpecification) SetNamespace(v string) *CustomizedMetricSpecification {
s.Namespace = &v
@@ -10827,6 +11450,139 @@ func (s *DescribeTerminationPolicyTypesOutput) SetTerminationPolicyTypes(v []*st
return s
}
+type DescribeTrafficSourcesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the Auto Scaling group.
+ //
+ // AutoScalingGroupName is a required field
+ AutoScalingGroupName *string `min:"1" type:"string" required:"true"`
+
+ // The maximum number of items to return with this call. The maximum value is
+ // 50.
+ MaxRecords *int64 `type:"integer"`
+
+ // The token for the next set of items to return. (You received this token from
+ // a previous call.)
+ NextToken *string `type:"string"`
+
+ // The traffic source type that you want to describe.
+ //
+ // The following lists the valid values:
+ //
+ // * elb if the traffic source is a Classic Load Balancer.
+ //
+ // * elbv2 if the traffic source is a Application Load Balancer, Gateway
+ // Load Balancer, or Network Load Balancer.
+ //
+ // * vpc-lattice if the traffic source is VPC Lattice.
+ TrafficSourceType *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeTrafficSourcesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeTrafficSourcesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeTrafficSourcesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeTrafficSourcesInput"}
+ if s.AutoScalingGroupName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName"))
+ }
+ if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1))
+ }
+ if s.TrafficSourceType != nil && len(*s.TrafficSourceType) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("TrafficSourceType", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAutoScalingGroupName sets the AutoScalingGroupName field's value.
+func (s *DescribeTrafficSourcesInput) SetAutoScalingGroupName(v string) *DescribeTrafficSourcesInput {
+ s.AutoScalingGroupName = &v
+ return s
+}
+
+// SetMaxRecords sets the MaxRecords field's value.
+func (s *DescribeTrafficSourcesInput) SetMaxRecords(v int64) *DescribeTrafficSourcesInput {
+ s.MaxRecords = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeTrafficSourcesInput) SetNextToken(v string) *DescribeTrafficSourcesInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetTrafficSourceType sets the TrafficSourceType field's value.
+func (s *DescribeTrafficSourcesInput) SetTrafficSourceType(v string) *DescribeTrafficSourcesInput {
+ s.TrafficSourceType = &v
+ return s
+}
+
+type DescribeTrafficSourcesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // This string indicates that the response contains more items than can be returned
+ // in a single response. To receive additional items, specify this string for
+ // the NextToken value when requesting the next set of items. This value is
+ // null when there are no more items to return.
+ NextToken *string `type:"string"`
+
+ // Information about the traffic sources.
+ TrafficSources []*TrafficSourceState `type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeTrafficSourcesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeTrafficSourcesOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeTrafficSourcesOutput) SetNextToken(v string) *DescribeTrafficSourcesOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetTrafficSources sets the TrafficSources field's value.
+func (s *DescribeTrafficSourcesOutput) SetTrafficSources(v []*TrafficSourceState) *DescribeTrafficSourcesOutput {
+ s.TrafficSources = v
+ return s
+}
+
type DescribeWarmPoolInput struct {
_ struct{} `type:"structure"`
@@ -10902,8 +11658,10 @@ type DescribeWarmPoolOutput struct {
// The instances that are currently in the warm pool.
Instances []*Instance `type:"list"`
- // The token for the next set of items to return. (You received this token from
- // a previous call.)
+ // This string indicates that the response contains more items than can be returned
+ // in a single response. To receive additional items, specify this string for
+ // the NextToken value when requesting the next set of items. This value is
+ // null when there are no more items to return.
NextToken *string `type:"string"`
// The warm pool configuration details.
@@ -11295,6 +12053,102 @@ func (s DetachLoadBalancersOutput) GoString() string {
return s.String()
}
+type DetachTrafficSourcesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the Auto Scaling group.
+ //
+ // AutoScalingGroupName is a required field
+ AutoScalingGroupName *string `min:"1" type:"string" required:"true"`
+
+ // The unique identifiers of one or more traffic sources. You can specify up
+ // to 10 traffic sources.
+ //
+ // TrafficSources is a required field
+ TrafficSources []*TrafficSourceIdentifier `type:"list" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DetachTrafficSourcesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DetachTrafficSourcesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DetachTrafficSourcesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DetachTrafficSourcesInput"}
+ if s.AutoScalingGroupName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName"))
+ }
+ if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1))
+ }
+ if s.TrafficSources == nil {
+ invalidParams.Add(request.NewErrParamRequired("TrafficSources"))
+ }
+ if s.TrafficSources != nil {
+ for i, v := range s.TrafficSources {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TrafficSources", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAutoScalingGroupName sets the AutoScalingGroupName field's value.
+func (s *DetachTrafficSourcesInput) SetAutoScalingGroupName(v string) *DetachTrafficSourcesInput {
+ s.AutoScalingGroupName = &v
+ return s
+}
+
+// SetTrafficSources sets the TrafficSources field's value.
+func (s *DetachTrafficSourcesInput) SetTrafficSources(v []*TrafficSourceIdentifier) *DetachTrafficSourcesInput {
+ s.TrafficSources = v
+ return s
+}
+
+type DetachTrafficSourcesOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DetachTrafficSourcesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DetachTrafficSourcesOutput) GoString() string {
+ return s.String()
+}
+
type DisableMetricsCollectionInput struct {
_ struct{} `type:"structure"`
@@ -12485,10 +13339,7 @@ type Group struct {
// The duration of the health check grace period, in seconds.
HealthCheckGracePeriod *int64 `type:"integer"`
- // The service to use for the health checks. The valid values are EC2 and ELB.
- // If you configure an Auto Scaling group to use ELB health checks, it considers
- // the instance unhealthy if it fails either the EC2 status checks or the load
- // balancer health checks.
+ // A comma-separated value string of one or more health check types.
//
// HealthCheckType is a required field
HealthCheckType *string `min:"1" type:"string" required:"true"`
@@ -12553,6 +13404,9 @@ type Group struct {
// The termination policies for the group.
TerminationPolicies []*string `type:"list"`
+ // The traffic sources associated with this Auto Scaling group.
+ TrafficSources []*TrafficSourceIdentifier `type:"list"`
+
// One or more subnet IDs, if applicable, separated by commas.
VPCZoneIdentifier *string `min:"1" type:"string"`
@@ -12761,6 +13615,12 @@ func (s *Group) SetTerminationPolicies(v []*string) *Group {
return s
}
+// SetTrafficSources sets the TrafficSources field's value.
+func (s *Group) SetTrafficSources(v []*TrafficSourceIdentifier) *Group {
+ s.TrafficSources = v
+ return s
+}
+
// SetVPCZoneIdentifier sets the VPCZoneIdentifier field's value.
func (s *Group) SetVPCZoneIdentifier(v string) *Group {
s.VPCZoneIdentifier = &v
@@ -12788,10 +13648,10 @@ type Instance struct {
// AvailabilityZone is a required field
AvailabilityZone *string `min:"1" type:"string" required:"true"`
- // The last reported health status of the instance. "Healthy" means that the
- // instance is healthy and should remain in service. "Unhealthy" means that
- // the instance is unhealthy and that Amazon EC2 Auto Scaling should terminate
- // and replace it.
+ // The last reported health status of the instance. Healthy means that the instance
+ // is healthy and should remain in service. Unhealthy means that the instance
+ // is unhealthy and that Amazon EC2 Auto Scaling should terminate and replace
+ // it.
//
// HealthStatus is a required field
HealthStatus *string `min:"1" type:"string" required:"true"`
@@ -12916,10 +13776,10 @@ type InstanceDetails struct {
// AvailabilityZone is a required field
AvailabilityZone *string `min:"1" type:"string" required:"true"`
- // The last reported health status of this instance. "Healthy" means that the
- // instance is healthy and should remain in service. "Unhealthy" means that
- // the instance is unhealthy and Amazon EC2 Auto Scaling should terminate and
- // replace it.
+ // The last reported health status of this instance. Healthy means that the
+ // instance is healthy and should remain in service. Unhealthy means that the
+ // instance is unhealthy and Amazon EC2 Auto Scaling should terminate and replace
+ // it.
//
// HealthStatus is a required field
HealthStatus *string `min:"1" type:"string" required:"true"`
@@ -13166,7 +14026,7 @@ type InstanceRefresh struct {
// The name of the Auto Scaling group.
AutoScalingGroupName *string `min:"1" type:"string"`
- // Describes the specific update you want to deploy.
+ // Describes the desired configuration for the instance refresh.
DesiredConfiguration *DesiredConfiguration `type:"structure"`
// The date and time at which the instance refresh ended.
@@ -13177,6 +14037,10 @@ type InstanceRefresh struct {
// The number of instances remaining to update before the instance refresh is
// complete.
+ //
+ // If you roll back the instance refresh, InstancesToUpdate shows you the number
+ // of instances that were not yet updated by the instance refresh. Therefore,
+ // these instances don't need to be replaced as part of the rollback.
InstancesToUpdate *int64 `type:"integer"`
// The percentage of the instance refresh that is complete. For each instance
@@ -13184,36 +14048,48 @@ type InstanceRefresh struct {
// and warm-up time. When the instance's health status changes to healthy and
// the specified warm-up time passes, the instance is considered updated and
// is added to the percentage complete.
+ //
+ // PercentageComplete does not include instances that are replaced during a
+ // rollback. This value gradually goes back down to zero during a rollback.
PercentageComplete *int64 `type:"integer"`
- // Describes the preferences for an instance refresh.
+ // The preferences for an instance refresh.
Preferences *RefreshPreferences `type:"structure"`
// Additional progress details for an Auto Scaling group that has a warm pool.
ProgressDetails *InstanceRefreshProgressDetails `type:"structure"`
+ // The rollback details.
+ RollbackDetails *RollbackDetails `type:"structure"`
+
// The date and time at which the instance refresh began.
StartTime *time.Time `type:"timestamp"`
// The current status for the instance refresh operation:
//
- // * Pending - The request was created, but the operation has not started.
+ // * Pending - The request was created, but the instance refresh has not
+ // started.
+ //
+ // * InProgress - An instance refresh is in progress.
//
- // * InProgress - The operation is in progress.
+ // * Successful - An instance refresh completed successfully.
//
- // * Successful - The operation completed successfully.
+ // * Failed - An instance refresh failed to complete. You can troubleshoot
+ // using the status reason and the scaling activities.
//
- // * Failed - The operation failed to complete. You can troubleshoot using
- // the status reason and the scaling activities.
+ // * Cancelling - An ongoing instance refresh is being cancelled.
//
- // * Cancelling - An ongoing operation is being cancelled. Cancellation does
- // not roll back any replacements that have already been completed, but it
- // prevents new replacements from being started.
+ // * Cancelled - The instance refresh is cancelled.
//
- // * Cancelled - The operation is cancelled.
+ // * RollbackInProgress - An instance refresh is being rolled back.
+ //
+ // * RollbackFailed - The rollback failed to complete. You can troubleshoot
+ // using the status reason and the scaling activities.
+ //
+ // * RollbackSuccessful - The rollback completed successfully.
Status *string `type:"string" enum:"InstanceRefreshStatus"`
- // Provides more details about the current status of the instance refresh.
+ // The explanation for the specific status assigned to this operation.
StatusReason *string `min:"1" type:"string"`
}
@@ -13283,6 +14159,12 @@ func (s *InstanceRefresh) SetProgressDetails(v *InstanceRefreshProgressDetails)
return s
}
+// SetRollbackDetails sets the RollbackDetails field's value.
+func (s *InstanceRefresh) SetRollbackDetails(v *RollbackDetails) *InstanceRefresh {
+ s.RollbackDetails = v
+ return s
+}
+
// SetStartTime sets the StartTime field's value.
func (s *InstanceRefresh) SetStartTime(v time.Time) *InstanceRefresh {
s.StartTime = &v
@@ -13301,8 +14183,7 @@ func (s *InstanceRefresh) SetStatusReason(v string) *InstanceRefresh {
return s
}
-// Reports the progress of an instance refresh on instances that are in the
-// Auto Scaling group.
+// Reports progress on replacing instances that are in the Auto Scaling group.
type InstanceRefreshLivePoolProgress struct {
_ struct{} `type:"structure"`
@@ -13347,18 +14228,16 @@ func (s *InstanceRefreshLivePoolProgress) SetPercentageComplete(v int64) *Instan
return s
}
-// Reports the progress of an instance refresh on an Auto Scaling group that
-// has a warm pool. This includes separate details for instances in the warm
-// pool and instances in the Auto Scaling group (the live pool).
+// Reports progress on replacing instances in an Auto Scaling group that has
+// a warm pool. This includes separate details for instances in the warm pool
+// and instances in the Auto Scaling group (the live pool).
type InstanceRefreshProgressDetails struct {
_ struct{} `type:"structure"`
- // Indicates the progress of an instance refresh on instances that are in the
- // Auto Scaling group.
+ // Reports progress on replacing instances that are in the Auto Scaling group.
LivePoolProgress *InstanceRefreshLivePoolProgress `type:"structure"`
- // Indicates the progress of an instance refresh on instances that are in the
- // warm pool.
+ // Reports progress on replacing instances that are in the warm pool.
WarmPoolProgress *InstanceRefreshWarmPoolProgress `type:"structure"`
}
@@ -13392,8 +14271,7 @@ func (s *InstanceRefreshProgressDetails) SetWarmPoolProgress(v *InstanceRefreshW
return s
}
-// Reports the progress of an instance refresh on instances that are in the
-// warm pool.
+// Reports progress on replacing instances that are in the warm pool.
type InstanceRefreshWarmPoolProgress struct {
_ struct{} `type:"structure"`
@@ -13995,11 +14873,9 @@ type InstancesDistribution struct {
//
// price-capacity-optimized (recommended)
//
- // Amazon EC2 Auto Scaling identifies the pools with the highest capacity availability
- // for the number of instances that are launching. This means that we will request
- // Spot Instances from the pools that we believe have the lowest chance of interruption
- // in the near term. Amazon EC2 Auto Scaling then requests Spot Instances from
- // the lowest priced of these pools.
+ // The price and capacity optimized allocation strategy looks at both price
+ // and capacity to select the Spot Instance pools that are the least likely
+ // to be interrupted and have the lowest possible price.
SpotAllocationStrategy *string `type:"string"`
// The number of Spot Instance pools across which to allocate your Spot Instances.
@@ -17811,27 +18687,48 @@ func (s RecordLifecycleActionHeartbeatOutput) GoString() string {
type RefreshPreferences struct {
_ struct{} `type:"structure"`
- // The amount of time, in seconds, to wait after a checkpoint before continuing.
- // This property is optional, but if you specify a value for it, you must also
- // specify a value for CheckpointPercentages. If you specify a value for CheckpointPercentages
- // and not for CheckpointDelay, the CheckpointDelay defaults to 3600 (1 hour).
+ // (Optional) Indicates whether to roll back the Auto Scaling group to its previous
+ // configuration if the instance refresh fails. The default is false.
+ //
+ // A rollback is not supported in the following situations:
+ //
+ // * There is no desired configuration specified for the instance refresh.
+ //
+ // * The Auto Scaling group has a launch template that uses an Amazon Web
+ // Services Systems Manager parameter instead of an AMI ID for the ImageId
+ // property.
+ //
+ // * The Auto Scaling group uses the launch template's $Latest or $Default
+ // version.
+ AutoRollback *bool `type:"boolean"`
+
+ // (Optional) The amount of time, in seconds, to wait after a checkpoint before
+ // continuing. This property is optional, but if you specify a value for it,
+ // you must also specify a value for CheckpointPercentages. If you specify a
+ // value for CheckpointPercentages and not for CheckpointDelay, the CheckpointDelay
+ // defaults to 3600 (1 hour).
CheckpointDelay *int64 `type:"integer"`
- // Threshold values for each checkpoint in ascending order. Each number must
- // be unique. To replace all instances in the Auto Scaling group, the last number
- // in the array must be 100.
+ // (Optional) Threshold values for each checkpoint in ascending order. Each
+ // number must be unique. To replace all instances in the Auto Scaling group,
+ // the last number in the array must be 100.
//
// For usage examples, see Adding checkpoints to an instance refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-adding-checkpoints-instance-refresh.html)
// in the Amazon EC2 Auto Scaling User Guide.
CheckpointPercentages []*int64 `type:"list"`
- // Not needed if the default instance warmup is defined for the group.
+ // A time period, in seconds, during which an instance refresh waits before
+ // moving on to replacing the next instance after a new instance enters the
+ // InService state.
//
- // The duration of the instance warmup, in seconds.
+ // This property is not required for normal usage. Instead, use the DefaultInstanceWarmup
+ // property of the Auto Scaling group. The InstanceWarmup and DefaultInstanceWarmup
+ // properties work the same way. Only specify this property if you must override
+ // the DefaultInstanceWarmup property.
//
- // The default is to use the value for the default instance warmup defined for
- // the group. If default instance warmup is null, then InstanceWarmup falls
- // back to the value of the health check grace period.
+ // If you do not specify this property, the instance warmup by default is the
+ // value of the DefaultInstanceWarmup property, if defined (which is recommended
+ // in all cases), or the HealthCheckGracePeriod property otherwise.
InstanceWarmup *int64 `type:"integer"`
// The amount of capacity in the Auto Scaling group that must pass your group's
@@ -17844,12 +18741,57 @@ type RefreshPreferences struct {
// has the effect of replacing all instances at the same time.
MinHealthyPercentage *int64 `type:"integer"`
- // A boolean value that indicates whether skip matching is enabled. If true,
+ // Choose the behavior that you want Amazon EC2 Auto Scaling to use if instances
+ // protected from scale in are found.
+ //
+ // The following lists the valid values:
+ //
+ // Refresh
+ //
+ // Amazon EC2 Auto Scaling replaces instances that are protected from scale
+ // in.
+ //
+ // Ignore
+ //
+ // Amazon EC2 Auto Scaling ignores instances that are protected from scale in
+ // and continues to replace instances that are not protected.
+ //
+ // Wait (default)
+ //
+ // Amazon EC2 Auto Scaling waits one hour for you to remove scale-in protection.
+ // Otherwise, the instance refresh will fail.
+ ScaleInProtectedInstances *string `type:"string" enum:"ScaleInProtectedInstances"`
+
+ // (Optional) Indicates whether skip matching is enabled. If enabled (true),
// then Amazon EC2 Auto Scaling skips replacing instances that match the desired
// configuration. If no desired configuration is specified, then it skips replacing
- // instances that have the same configuration that is already set on the group.
- // The default is false.
+ // instances that have the same launch template and instance types that the
+ // Auto Scaling group was using before the start of the instance refresh. The
+ // default is false.
+ //
+ // For more information, see Use an instance refresh with skip matching (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh-skip-matching.html)
+ // in the Amazon EC2 Auto Scaling User Guide.
SkipMatching *bool `type:"boolean"`
+
+ // Choose the behavior that you want Amazon EC2 Auto Scaling to use if instances
+ // in Standby state are found.
+ //
+ // The following lists the valid values:
+ //
+ // Terminate
+ //
+ // Amazon EC2 Auto Scaling terminates instances that are in Standby.
+ //
+ // Ignore
+ //
+ // Amazon EC2 Auto Scaling ignores instances that are in Standby and continues
+ // to replace instances that are in the InService state.
+ //
+ // Wait (default)
+ //
+ // Amazon EC2 Auto Scaling waits one hour for you to return the instances to
+ // service. Otherwise, the instance refresh will fail.
+ StandbyInstances *string `type:"string" enum:"StandbyInstances"`
}
// String returns the string representation.
@@ -17870,6 +18812,12 @@ func (s RefreshPreferences) GoString() string {
return s.String()
}
+// SetAutoRollback sets the AutoRollback field's value.
+func (s *RefreshPreferences) SetAutoRollback(v bool) *RefreshPreferences {
+ s.AutoRollback = &v
+ return s
+}
+
// SetCheckpointDelay sets the CheckpointDelay field's value.
func (s *RefreshPreferences) SetCheckpointDelay(v int64) *RefreshPreferences {
s.CheckpointDelay = &v
@@ -17894,12 +18842,24 @@ func (s *RefreshPreferences) SetMinHealthyPercentage(v int64) *RefreshPreference
return s
}
+// SetScaleInProtectedInstances sets the ScaleInProtectedInstances field's value.
+func (s *RefreshPreferences) SetScaleInProtectedInstances(v string) *RefreshPreferences {
+ s.ScaleInProtectedInstances = &v
+ return s
+}
+
// SetSkipMatching sets the SkipMatching field's value.
func (s *RefreshPreferences) SetSkipMatching(v bool) *RefreshPreferences {
s.SkipMatching = &v
return s
}
+// SetStandbyInstances sets the StandbyInstances field's value.
+func (s *RefreshPreferences) SetStandbyInstances(v string) *RefreshPreferences {
+ s.StandbyInstances = &v
+ return s
+}
+
type ResumeProcessesOutput struct {
_ struct{} `type:"structure"`
}
@@ -17922,6 +18882,153 @@ func (s ResumeProcessesOutput) GoString() string {
return s.String()
}
+// Details about an instance refresh rollback.
+type RollbackDetails struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates the value of InstancesToUpdate at the time the rollback started.
+ InstancesToUpdateOnRollback *int64 `type:"integer"`
+
+ // Indicates the value of PercentageComplete at the time the rollback started.
+ PercentageCompleteOnRollback *int64 `type:"integer"`
+
+ // Reports progress on replacing instances in an Auto Scaling group that has
+ // a warm pool. This includes separate details for instances in the warm pool
+ // and instances in the Auto Scaling group (the live pool).
+ ProgressDetailsOnRollback *InstanceRefreshProgressDetails `type:"structure"`
+
+ // The reason for this instance refresh rollback (for example, whether a manual
+ // or automatic rollback was initiated).
+ RollbackReason *string `min:"1" type:"string"`
+
+ // The date and time at which the rollback began.
+ RollbackStartTime *time.Time `type:"timestamp"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RollbackDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RollbackDetails) GoString() string {
+ return s.String()
+}
+
+// SetInstancesToUpdateOnRollback sets the InstancesToUpdateOnRollback field's value.
+func (s *RollbackDetails) SetInstancesToUpdateOnRollback(v int64) *RollbackDetails {
+ s.InstancesToUpdateOnRollback = &v
+ return s
+}
+
+// SetPercentageCompleteOnRollback sets the PercentageCompleteOnRollback field's value.
+func (s *RollbackDetails) SetPercentageCompleteOnRollback(v int64) *RollbackDetails {
+ s.PercentageCompleteOnRollback = &v
+ return s
+}
+
+// SetProgressDetailsOnRollback sets the ProgressDetailsOnRollback field's value.
+func (s *RollbackDetails) SetProgressDetailsOnRollback(v *InstanceRefreshProgressDetails) *RollbackDetails {
+ s.ProgressDetailsOnRollback = v
+ return s
+}
+
+// SetRollbackReason sets the RollbackReason field's value.
+func (s *RollbackDetails) SetRollbackReason(v string) *RollbackDetails {
+ s.RollbackReason = &v
+ return s
+}
+
+// SetRollbackStartTime sets the RollbackStartTime field's value.
+func (s *RollbackDetails) SetRollbackStartTime(v time.Time) *RollbackDetails {
+ s.RollbackStartTime = &v
+ return s
+}
+
+type RollbackInstanceRefreshInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the Auto Scaling group.
+ AutoScalingGroupName *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RollbackInstanceRefreshInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RollbackInstanceRefreshInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RollbackInstanceRefreshInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RollbackInstanceRefreshInput"}
+ if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAutoScalingGroupName sets the AutoScalingGroupName field's value.
+func (s *RollbackInstanceRefreshInput) SetAutoScalingGroupName(v string) *RollbackInstanceRefreshInput {
+ s.AutoScalingGroupName = &v
+ return s
+}
+
+type RollbackInstanceRefreshOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The instance refresh ID associated with the request. This is the unique ID
+ // assigned to the instance refresh when it was started.
+ InstanceRefreshId *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RollbackInstanceRefreshOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RollbackInstanceRefreshOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceRefreshId sets the InstanceRefreshId field's value.
+func (s *RollbackInstanceRefreshOutput) SetInstanceRefreshId(v string) *RollbackInstanceRefreshOutput {
+ s.InstanceRefreshId = &v
+ return s
+}
+
// Describes a scaling policy.
type ScalingPolicy struct {
_ struct{} `type:"structure"`
@@ -18777,21 +19884,25 @@ type StartInstanceRefreshInput struct {
// When you specify a new launch template or a new version of the current launch
// template for your desired configuration, consider enabling the SkipMatching
// property in preferences. If it's enabled, Amazon EC2 Auto Scaling skips replacing
- // instances that already use the specified launch template and version. This
- // can help you reduce the number of replacements that are required to apply
- // updates.
+ // instances that already use the specified launch template and instance types.
+ // This can help you reduce the number of replacements that are required to
+ // apply updates.
DesiredConfiguration *DesiredConfiguration `type:"structure"`
- // Set of preferences associated with the instance refresh request. If not provided,
- // the default values are used.
+ // Sets your preferences for the instance refresh so that it performs as expected
+ // when you start it. Includes the instance warmup time, the minimum healthy
+ // percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use
+ // if instances that are in Standby state or protected from scale in are found.
+ // You can also choose to enable additional features, such as the following:
+ //
+ // * Auto rollback
+ //
+ // * Checkpoints
+ //
+ // * Skip matching
Preferences *RefreshPreferences `type:"structure"`
// The strategy to use for the instance refresh. The only valid value is Rolling.
- //
- // A rolling update helps you update your instances gradually. A rolling update
- // can fail due to failed health checks or if instances are on standby or are
- // protected from scale in. If the rolling update process fails, any instances
- // that are replaced are not rolled back to their previous configuration.
Strategy *string `type:"string" enum:"RefreshStrategy"`
}
@@ -18861,7 +19972,7 @@ func (s *StartInstanceRefreshInput) SetStrategy(v string) *StartInstanceRefreshI
type StartInstanceRefreshOutput struct {
_ struct{} `type:"structure"`
- // A unique ID for tracking the progress of the request.
+ // A unique ID for tracking the progress of the instance refresh.
InstanceRefreshId *string `min:"1" type:"string"`
}
@@ -19319,6 +20430,215 @@ func (s *TargetTrackingConfiguration) SetTargetValue(v float64) *TargetTrackingC
return s
}
+// The metric data to return. Also defines whether this call is returning data
+// for one metric only, or whether it is performing a math expression on the
+// values of returned metric statistics to create a new time series. A time
+// series is a series of data points, each of which is associated with a timestamp.
+type TargetTrackingMetricDataQuery struct {
+ _ struct{} `type:"structure"`
+
+ // The math expression to perform on the returned data, if this object is performing
+ // a math expression. This expression can use the Id of the other metrics to
+ // refer to those metrics, and can also use the Id of other expressions to use
+ // the result of those expressions.
+ //
+ // Conditional: Within each TargetTrackingMetricDataQuery object, you must specify
+ // either Expression or MetricStat, but not both.
+ Expression *string `min:"1" type:"string"`
+
+ // A short name that identifies the object's results in the response. This name
+ // must be unique among all TargetTrackingMetricDataQuery objects specified
+ // for a single scaling policy. If you are performing math expressions on this
+ // set of data, this name represents that data and can serve as a variable in
+ // the mathematical expression. The valid characters are letters, numbers, and
+ // underscores. The first character must be a lowercase letter.
+ //
+ // Id is a required field
+ Id *string `min:"1" type:"string" required:"true"`
+
+ // A human-readable label for this metric or expression. This is especially
+ // useful if this is a math expression, so that you know what the value represents.
+ Label *string `type:"string"`
+
+ // Information about the metric data to return.
+ //
+ // Conditional: Within each TargetTrackingMetricDataQuery object, you must specify
+ // either Expression or MetricStat, but not both.
+ MetricStat *TargetTrackingMetricStat `type:"structure"`
+
+ // Indicates whether to return the timestamps and raw data values of this metric.
+ //
+ // If you use any math expressions, specify true for this value for only the
+ // final math expression that the metric specification is based on. You must
+ // specify false for ReturnData for all the other metrics and expressions used
+ // in the metric specification.
+ //
+ // If you are only retrieving metrics and not performing any math expressions,
+ // do not specify anything for ReturnData. This sets it to its default (true).
+ ReturnData *bool `type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TargetTrackingMetricDataQuery) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TargetTrackingMetricDataQuery) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TargetTrackingMetricDataQuery) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TargetTrackingMetricDataQuery"}
+ if s.Expression != nil && len(*s.Expression) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Expression", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.Id != nil && len(*s.Id) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Id", 1))
+ }
+ if s.MetricStat != nil {
+ if err := s.MetricStat.Validate(); err != nil {
+ invalidParams.AddNested("MetricStat", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetExpression sets the Expression field's value.
+func (s *TargetTrackingMetricDataQuery) SetExpression(v string) *TargetTrackingMetricDataQuery {
+ s.Expression = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *TargetTrackingMetricDataQuery) SetId(v string) *TargetTrackingMetricDataQuery {
+ s.Id = &v
+ return s
+}
+
+// SetLabel sets the Label field's value.
+func (s *TargetTrackingMetricDataQuery) SetLabel(v string) *TargetTrackingMetricDataQuery {
+ s.Label = &v
+ return s
+}
+
+// SetMetricStat sets the MetricStat field's value.
+func (s *TargetTrackingMetricDataQuery) SetMetricStat(v *TargetTrackingMetricStat) *TargetTrackingMetricDataQuery {
+ s.MetricStat = v
+ return s
+}
+
+// SetReturnData sets the ReturnData field's value.
+func (s *TargetTrackingMetricDataQuery) SetReturnData(v bool) *TargetTrackingMetricDataQuery {
+ s.ReturnData = &v
+ return s
+}
+
+// This structure defines the CloudWatch metric to return, along with the statistic
+// and unit.
+//
+// For more information about the CloudWatch terminology below, see Amazon CloudWatch
+// concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html)
+// in the Amazon CloudWatch User Guide.
+type TargetTrackingMetricStat struct {
+ _ struct{} `type:"structure"`
+
+ // The metric to use.
+ //
+ // Metric is a required field
+ Metric *Metric `type:"structure" required:"true"`
+
+ // The statistic to return. It can include any CloudWatch statistic or extended
+ // statistic. For a list of valid values, see the table in Statistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Statistic)
+ // in the Amazon CloudWatch User Guide.
+ //
+ // The most commonly used metric for scaling is Average.
+ //
+ // Stat is a required field
+ Stat *string `min:"1" type:"string" required:"true"`
+
+ // The unit to use for the returned data points. For a complete list of the
+ // units that CloudWatch supports, see the MetricDatum (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html)
+ // data type in the Amazon CloudWatch API Reference.
+ Unit *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TargetTrackingMetricStat) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TargetTrackingMetricStat) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TargetTrackingMetricStat) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TargetTrackingMetricStat"}
+ if s.Metric == nil {
+ invalidParams.Add(request.NewErrParamRequired("Metric"))
+ }
+ if s.Stat == nil {
+ invalidParams.Add(request.NewErrParamRequired("Stat"))
+ }
+ if s.Stat != nil && len(*s.Stat) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Stat", 1))
+ }
+ if s.Metric != nil {
+ if err := s.Metric.Validate(); err != nil {
+ invalidParams.AddNested("Metric", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetMetric sets the Metric field's value.
+func (s *TargetTrackingMetricStat) SetMetric(v *Metric) *TargetTrackingMetricStat {
+ s.Metric = v
+ return s
+}
+
+// SetStat sets the Stat field's value.
+func (s *TargetTrackingMetricStat) SetStat(v string) *TargetTrackingMetricStat {
+ s.Stat = &v
+ return s
+}
+
+// SetUnit sets the Unit field's value.
+func (s *TargetTrackingMetricStat) SetUnit(v string) *TargetTrackingMetricStat {
+ s.Unit = &v
+ return s
+}
+
type TerminateInstanceInAutoScalingGroupInput struct {
_ struct{} `type:"structure"`
@@ -19456,6 +20776,195 @@ func (s *TotalLocalStorageGBRequest) SetMin(v float64) *TotalLocalStorageGBReque
return s
}
+// Identifying information for a traffic source.
+type TrafficSourceIdentifier struct {
+ _ struct{} `type:"structure"`
+
+ // Identifies the traffic source.
+ //
+ // For Application Load Balancers, Gateway Load Balancers, Network Load Balancers,
+ // and VPC Lattice, this will be the Amazon Resource Name (ARN) for a target
+ // group in this account and Region. For Classic Load Balancers, this will be
+ // the name of the Classic Load Balancer in this account and Region.
+ //
+ // For example:
+ //
+ // * Application Load Balancer ARN: arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/1234567890123456
+ //
+ // * Classic Load Balancer name: my-classic-load-balancer
+ //
+ // * VPC Lattice ARN: arn:aws:vpc-lattice:us-west-2:123456789012:targetgroup/tg-1234567890123456
+ //
+ // To get the ARN of a target group for a Application Load Balancer, Gateway
+ // Load Balancer, or Network Load Balancer, or the name of a Classic Load Balancer,
+ // use the Elastic Load Balancing DescribeTargetGroups (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTargetGroups.html)
+ // and DescribeLoadBalancers (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html)
+ // API operations.
+ //
+ // To get the ARN of a target group for VPC Lattice, use the VPC Lattice GetTargetGroup
+ // (https://docs.aws.amazon.com/vpc-lattice/latest/APIReference/API_GetTargetGroup.html)
+ // API operation.
+ //
+ // Identifier is a required field
+ Identifier *string `min:"1" type:"string" required:"true"`
+
+ // Provides additional context for the value of Identifier.
+ //
+ // The following lists the valid values:
+ //
+ // * elb if Identifier is the name of a Classic Load Balancer.
+ //
+ // * elbv2 if Identifier is the ARN of an Application Load Balancer, Gateway
+ // Load Balancer, or Network Load Balancer target group.
+ //
+ // * vpc-lattice if Identifier is the ARN of a VPC Lattice target group.
+ //
+ // Required if the identifier is the name of a Classic Load Balancer.
+ Type *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TrafficSourceIdentifier) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TrafficSourceIdentifier) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TrafficSourceIdentifier) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TrafficSourceIdentifier"}
+ if s.Identifier == nil {
+ invalidParams.Add(request.NewErrParamRequired("Identifier"))
+ }
+ if s.Identifier != nil && len(*s.Identifier) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Identifier", 1))
+ }
+ if s.Type != nil && len(*s.Type) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Type", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIdentifier sets the Identifier field's value.
+func (s *TrafficSourceIdentifier) SetIdentifier(v string) *TrafficSourceIdentifier {
+ s.Identifier = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *TrafficSourceIdentifier) SetType(v string) *TrafficSourceIdentifier {
+ s.Type = &v
+ return s
+}
+
+// Describes the state of a traffic source.
+type TrafficSourceState struct {
+ _ struct{} `type:"structure"`
+
+ // The unique identifier of the traffic source.
+ Identifier *string `min:"1" type:"string"`
+
+ // Describes the current state of a traffic source.
+ //
+ // The state values are as follows:
+ //
+ // * Adding - The Auto Scaling instances are being registered with the load
+ // balancer or target group.
+ //
+ // * Added - All Auto Scaling instances are registered with the load balancer
+ // or target group.
+ //
+ // * InService - For an Elastic Load Balancing load balancer or target group,
+ // at least one Auto Scaling instance passed an ELB health check. For VPC
+ // Lattice, at least one Auto Scaling instance passed an VPC_LATTICE health
+ // check.
+ //
+ // * Removing - The Auto Scaling instances are being deregistered from the
+ // load balancer or target group. If connection draining (deregistration
+ // delay) is enabled, Elastic Load Balancing or VPC Lattice waits for in-flight
+ // requests to complete before deregistering the instances.
+ //
+ // * Removed - All Auto Scaling instances are deregistered from the load
+ // balancer or target group.
+ State *string `min:"1" type:"string"`
+
+ // This is replaced by Identifier.
+ //
+ // Deprecated: TrafficSource has been replaced by Identifier
+ TrafficSource *string `min:"1" deprecated:"true" type:"string"`
+
+ // Provides additional context for the value of Identifier.
+ //
+ // The following lists the valid values:
+ //
+ // * elb if Identifier is the name of a Classic Load Balancer.
+ //
+ // * elbv2 if Identifier is the ARN of an Application Load Balancer, Gateway
+ // Load Balancer, or Network Load Balancer target group.
+ //
+ // * vpc-lattice if Identifier is the ARN of a VPC Lattice target group.
+ //
+ // Required if the identifier is the name of a Classic Load Balancer.
+ Type *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TrafficSourceState) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TrafficSourceState) GoString() string {
+ return s.String()
+}
+
+// SetIdentifier sets the Identifier field's value.
+func (s *TrafficSourceState) SetIdentifier(v string) *TrafficSourceState {
+ s.Identifier = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *TrafficSourceState) SetState(v string) *TrafficSourceState {
+ s.State = &v
+ return s
+}
+
+// SetTrafficSource sets the TrafficSource field's value.
+func (s *TrafficSourceState) SetTrafficSource(v string) *TrafficSourceState {
+ s.TrafficSource = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *TrafficSourceState) SetType(v string) *TrafficSourceState {
+ s.Type = &v
+ return s
+}
+
type UpdateAutoScalingGroupInput struct {
_ struct{} `type:"structure"`
@@ -19483,23 +20992,24 @@ type UpdateAutoScalingGroupInput struct {
// in the Amazon EC2 Auto Scaling User Guide.
DefaultCooldown *int64 `type:"integer"`
- // The amount of time, in seconds, until a newly launched instance can contribute
- // to the Amazon CloudWatch metrics. This delay lets an instance finish initializing
- // before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in
- // more reliable usage data. Set this value equal to the amount of time that
- // it takes for resource consumption to become stable after an instance reaches
- // the InService state. For more information, see Set the default instance warmup
- // for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html)
+ // The amount of time, in seconds, until a new instance is considered to have
+ // finished initializing and resource consumption to become stable after it
+ // enters the InService state.
+ //
+ // During an instance refresh, Amazon EC2 Auto Scaling waits for the warm-up
+ // period after it replaces an instance before it moves on to replacing the
+ // next instance. Amazon EC2 Auto Scaling also waits for the warm-up period
+ // before aggregating the metrics for new instances with existing instances
+ // in the Amazon CloudWatch metrics that are used for scaling, resulting in
+ // more reliable usage data. For more information, see Set the default instance
+ // warmup for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html)
// in the Amazon EC2 Auto Scaling User Guide.
//
- // To manage your warm-up settings at the group level, we recommend that you
- // set the default instance warmup, even if its value is set to 0 seconds. This
- // also optimizes the performance of scaling policies that scale continuously,
- // such as target tracking and step scaling policies.
- //
- // If you need to remove a value that you previously set, include the property
- // but specify -1 for the value. However, we strongly recommend keeping the
- // default instance warmup enabled by specifying a minimum value of 0.
+ // To manage various warm-up settings at the group level, we recommend that
+ // you set the default instance warmup, even if it is set to 0 seconds. To remove
+ // a value that you previously set, include the property but specify -1 for
+ // the value. However, we strongly recommend keeping the default instance warmup
+ // enabled by specifying a value of 0 or other nominal value.
DefaultInstanceWarmup *int64 `type:"integer"`
// The desired capacity is the initial capacity of the Auto Scaling group after
@@ -19522,17 +21032,21 @@ type UpdateAutoScalingGroupInput struct {
// The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before
// checking the health status of an EC2 instance that has come into service
- // and marking it unhealthy due to a failed Elastic Load Balancing or custom
- // health check. This is useful if your instances do not immediately pass these
- // health checks after they enter the InService state. For more information,
- // see Set the health check grace period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html)
+ // and marking it unhealthy due to a failed health check. This is useful if
+ // your instances do not immediately pass their health checks after they enter
+ // the InService state. For more information, see Set the health check grace
+ // period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html)
// in the Amazon EC2 Auto Scaling User Guide.
HealthCheckGracePeriod *int64 `type:"integer"`
- // The service to use for the health checks. The valid values are EC2 and ELB.
- // If you configure an Auto Scaling group to use ELB health checks, it considers
- // the instance unhealthy if it fails either the EC2 status checks or the load
- // balancer health checks.
+ // A comma-separated value string of one or more health check types.
+ //
+ // The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health
+ // check and cannot be disabled. For more information, see Health checks for
+ // Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html)
+ // in the Amazon EC2 Auto Scaling User Guide.
+ //
+ // Only specify EC2 if you must clear a value that was previously set.
HealthCheckType *string `min:"1" type:"string"`
// The name of the launch configuration. If you specify LaunchConfigurationName
@@ -20149,6 +21663,15 @@ const (
// InstanceRefreshStatusCancelled is a InstanceRefreshStatus enum value
InstanceRefreshStatusCancelled = "Cancelled"
+
+ // InstanceRefreshStatusRollbackInProgress is a InstanceRefreshStatus enum value
+ InstanceRefreshStatusRollbackInProgress = "RollbackInProgress"
+
+ // InstanceRefreshStatusRollbackFailed is a InstanceRefreshStatus enum value
+ InstanceRefreshStatusRollbackFailed = "RollbackFailed"
+
+ // InstanceRefreshStatusRollbackSuccessful is a InstanceRefreshStatus enum value
+ InstanceRefreshStatusRollbackSuccessful = "RollbackSuccessful"
)
// InstanceRefreshStatus_Values returns all elements of the InstanceRefreshStatus enum
@@ -20160,6 +21683,9 @@ func InstanceRefreshStatus_Values() []string {
InstanceRefreshStatusFailed,
InstanceRefreshStatusCancelling,
InstanceRefreshStatusCancelled,
+ InstanceRefreshStatusRollbackInProgress,
+ InstanceRefreshStatusRollbackFailed,
+ InstanceRefreshStatusRollbackSuccessful,
}
}
@@ -20467,6 +21993,26 @@ func RefreshStrategy_Values() []string {
}
}
+const (
+ // ScaleInProtectedInstancesRefresh is a ScaleInProtectedInstances enum value
+ ScaleInProtectedInstancesRefresh = "Refresh"
+
+ // ScaleInProtectedInstancesIgnore is a ScaleInProtectedInstances enum value
+ ScaleInProtectedInstancesIgnore = "Ignore"
+
+ // ScaleInProtectedInstancesWait is a ScaleInProtectedInstances enum value
+ ScaleInProtectedInstancesWait = "Wait"
+)
+
+// ScaleInProtectedInstances_Values returns all elements of the ScaleInProtectedInstances enum
+func ScaleInProtectedInstances_Values() []string {
+ return []string{
+ ScaleInProtectedInstancesRefresh,
+ ScaleInProtectedInstancesIgnore,
+ ScaleInProtectedInstancesWait,
+ }
+}
+
const (
// ScalingActivityStatusCodePendingSpotBidPlacement is a ScalingActivityStatusCode enum value
ScalingActivityStatusCodePendingSpotBidPlacement = "PendingSpotBidPlacement"
@@ -20503,6 +22049,9 @@ const (
// ScalingActivityStatusCodeCancelled is a ScalingActivityStatusCode enum value
ScalingActivityStatusCodeCancelled = "Cancelled"
+
+ // ScalingActivityStatusCodeWaitingForConnectionDraining is a ScalingActivityStatusCode enum value
+ ScalingActivityStatusCodeWaitingForConnectionDraining = "WaitingForConnectionDraining"
)
// ScalingActivityStatusCode_Values returns all elements of the ScalingActivityStatusCode enum
@@ -20520,6 +22069,27 @@ func ScalingActivityStatusCode_Values() []string {
ScalingActivityStatusCodeSuccessful,
ScalingActivityStatusCodeFailed,
ScalingActivityStatusCodeCancelled,
+ ScalingActivityStatusCodeWaitingForConnectionDraining,
+ }
+}
+
+const (
+ // StandbyInstancesTerminate is a StandbyInstances enum value
+ StandbyInstancesTerminate = "Terminate"
+
+ // StandbyInstancesIgnore is a StandbyInstances enum value
+ StandbyInstancesIgnore = "Ignore"
+
+ // StandbyInstancesWait is a StandbyInstances enum value
+ StandbyInstancesWait = "Wait"
+)
+
+// StandbyInstances_Values returns all elements of the StandbyInstances enum
+func StandbyInstances_Values() []string {
+ return []string{
+ StandbyInstancesTerminate,
+ StandbyInstancesIgnore,
+ StandbyInstancesWait,
}
}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go
index 85e907df74fe..dc216126a58e 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go
@@ -7,8 +7,8 @@ const (
// ErrCodeActiveInstanceRefreshNotFoundFault for service response error code
// "ActiveInstanceRefreshNotFound".
//
- // The request failed because an active instance refresh for the specified Auto
- // Scaling group was not found.
+ // The request failed because an active instance refresh or rollback for the
+ // specified Auto Scaling group was not found.
ErrCodeActiveInstanceRefreshNotFoundFault = "ActiveInstanceRefreshNotFound"
// ErrCodeAlreadyExistsFault for service response error code
@@ -21,8 +21,8 @@ const (
// ErrCodeInstanceRefreshInProgressFault for service response error code
// "InstanceRefreshInProgress".
//
- // The request failed because an active instance refresh operation already exists
- // for the specified Auto Scaling group.
+ // The request failed because an active instance refresh already exists for
+ // the specified Auto Scaling group.
ErrCodeInstanceRefreshInProgressFault = "InstanceRefreshInProgress"
// ErrCodeInvalidNextToken for service response error code
@@ -31,6 +31,15 @@ const (
// The NextToken value is not valid.
ErrCodeInvalidNextToken = "InvalidNextToken"
+ // ErrCodeIrreversibleInstanceRefreshFault for service response error code
+ // "IrreversibleInstanceRefresh".
+ //
+ // The request failed because a desired configuration was not found or an incompatible
+ // launch template (uses a Systems Manager parameter instead of an AMI ID) or
+ // launch template version ($Latest or $Default) is present on the Auto Scaling
+ // group.
+ ErrCodeIrreversibleInstanceRefreshFault = "IrreversibleInstanceRefresh"
+
// ErrCodeLimitExceededFault for service response error code
// "LimitExceeded".
//
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
index 23be76a83ff7..1ed7b2921c27 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
@@ -429,8 +429,7 @@ func (c *EC2) AcceptVpcEndpointConnectionsRequest(input *AcceptVpcEndpointConnec
// AcceptVpcEndpointConnections API operation for Amazon Elastic Compute Cloud.
//
-// Accepts one or more interface VPC endpoint connection requests to your VPC
-// endpoint service.
+// Accepts connection requests to your VPC endpoint service.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -852,10 +851,13 @@ func (c *EC2) AllocateIpamPoolCidrRequest(input *AllocateIpamPoolCidrInput) (req
// AllocateIpamPoolCidr API operation for Amazon Elastic Compute Cloud.
//
// Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment
-// from an IPAM pool to another resource or IPAM pool. For more information,
+// from an IPAM pool to another IPAM pool or to a resource. For more information,
// see Allocate CIDRs (https://docs.aws.amazon.com/vpc/latest/ipam/allocate-cidrs-ipam.html)
// in the Amazon VPC IPAM User Guide.
//
+// This action creates an allocation with strong consistency. The returned CIDR
+// will not overlap with any other allocations from the same pool.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1146,6 +1148,81 @@ func (c *EC2) AssignPrivateIpAddressesWithContext(ctx aws.Context, input *Assign
return out, req.Send()
}
+const opAssignPrivateNatGatewayAddress = "AssignPrivateNatGatewayAddress"
+
+// AssignPrivateNatGatewayAddressRequest generates a "aws/request.Request" representing the
+// client's request for the AssignPrivateNatGatewayAddress operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssignPrivateNatGatewayAddress for more information on using the AssignPrivateNatGatewayAddress
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the AssignPrivateNatGatewayAddressRequest method.
+// req, resp := client.AssignPrivateNatGatewayAddressRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignPrivateNatGatewayAddress
+func (c *EC2) AssignPrivateNatGatewayAddressRequest(input *AssignPrivateNatGatewayAddressInput) (req *request.Request, output *AssignPrivateNatGatewayAddressOutput) {
+ op := &request.Operation{
+ Name: opAssignPrivateNatGatewayAddress,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssignPrivateNatGatewayAddressInput{}
+ }
+
+ output = &AssignPrivateNatGatewayAddressOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssignPrivateNatGatewayAddress API operation for Amazon Elastic Compute Cloud.
+//
+// Assigns one or more private IPv4 addresses to a private NAT gateway. For
+// more information, see Work with NAT gateways (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-working-with)
+// in the Amazon Virtual Private Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation AssignPrivateNatGatewayAddress for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignPrivateNatGatewayAddress
+func (c *EC2) AssignPrivateNatGatewayAddress(input *AssignPrivateNatGatewayAddressInput) (*AssignPrivateNatGatewayAddressOutput, error) {
+ req, out := c.AssignPrivateNatGatewayAddressRequest(input)
+ return out, req.Send()
+}
+
+// AssignPrivateNatGatewayAddressWithContext is the same as AssignPrivateNatGatewayAddress with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssignPrivateNatGatewayAddress for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) AssignPrivateNatGatewayAddressWithContext(ctx aws.Context, input *AssignPrivateNatGatewayAddressInput, opts ...request.Option) (*AssignPrivateNatGatewayAddressOutput, error) {
+ req, out := c.AssignPrivateNatGatewayAddressRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opAssociateAddress = "AssociateAddress"
// AssociateAddressRequest generates a "aws/request.Request" representing the
@@ -1665,6 +1742,161 @@ func (c *EC2) AssociateInstanceEventWindowWithContext(ctx aws.Context, input *As
return out, req.Send()
}
+const opAssociateIpamResourceDiscovery = "AssociateIpamResourceDiscovery"
+
+// AssociateIpamResourceDiscoveryRequest generates a "aws/request.Request" representing the
+// client's request for the AssociateIpamResourceDiscovery operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssociateIpamResourceDiscovery for more information on using the AssociateIpamResourceDiscovery
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the AssociateIpamResourceDiscoveryRequest method.
+// req, resp := client.AssociateIpamResourceDiscoveryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIpamResourceDiscovery
+func (c *EC2) AssociateIpamResourceDiscoveryRequest(input *AssociateIpamResourceDiscoveryInput) (req *request.Request, output *AssociateIpamResourceDiscoveryOutput) {
+ op := &request.Operation{
+ Name: opAssociateIpamResourceDiscovery,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssociateIpamResourceDiscoveryInput{}
+ }
+
+ output = &AssociateIpamResourceDiscoveryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssociateIpamResourceDiscovery API operation for Amazon Elastic Compute Cloud.
+//
+// Associates an IPAM resource discovery with an Amazon VPC IPAM. A resource
+// discovery is an IPAM component that enables IPAM to manage and monitor resources
+// that belong to the owning account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation AssociateIpamResourceDiscovery for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIpamResourceDiscovery
+func (c *EC2) AssociateIpamResourceDiscovery(input *AssociateIpamResourceDiscoveryInput) (*AssociateIpamResourceDiscoveryOutput, error) {
+ req, out := c.AssociateIpamResourceDiscoveryRequest(input)
+ return out, req.Send()
+}
+
+// AssociateIpamResourceDiscoveryWithContext is the same as AssociateIpamResourceDiscovery with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssociateIpamResourceDiscovery for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) AssociateIpamResourceDiscoveryWithContext(ctx aws.Context, input *AssociateIpamResourceDiscoveryInput, opts ...request.Option) (*AssociateIpamResourceDiscoveryOutput, error) {
+ req, out := c.AssociateIpamResourceDiscoveryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssociateNatGatewayAddress = "AssociateNatGatewayAddress"
+
+// AssociateNatGatewayAddressRequest generates a "aws/request.Request" representing the
+// client's request for the AssociateNatGatewayAddress operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssociateNatGatewayAddress for more information on using the AssociateNatGatewayAddress
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the AssociateNatGatewayAddressRequest method.
+// req, resp := client.AssociateNatGatewayAddressRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateNatGatewayAddress
+func (c *EC2) AssociateNatGatewayAddressRequest(input *AssociateNatGatewayAddressInput) (req *request.Request, output *AssociateNatGatewayAddressOutput) {
+ op := &request.Operation{
+ Name: opAssociateNatGatewayAddress,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssociateNatGatewayAddressInput{}
+ }
+
+ output = &AssociateNatGatewayAddressOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssociateNatGatewayAddress API operation for Amazon Elastic Compute Cloud.
+//
+// Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a
+// public NAT gateway. For more information, see Work with NAT gateways (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-working-with)
+// in the Amazon Virtual Private Cloud User Guide.
+//
+// By default, you can associate up to 2 Elastic IP addresses per public NAT
+// gateway. You can increase the limit by requesting a quota adjustment. For
+// more information, see Elastic IP address quotas (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-eips)
+// in the Amazon Virtual Private Cloud User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation AssociateNatGatewayAddress for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateNatGatewayAddress
+func (c *EC2) AssociateNatGatewayAddress(input *AssociateNatGatewayAddressInput) (*AssociateNatGatewayAddressOutput, error) {
+ req, out := c.AssociateNatGatewayAddressRequest(input)
+ return out, req.Send()
+}
+
+// AssociateNatGatewayAddressWithContext is the same as AssociateNatGatewayAddress with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssociateNatGatewayAddress for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) AssociateNatGatewayAddressWithContext(ctx aws.Context, input *AssociateNatGatewayAddressInput, opts ...request.Option) (*AssociateNatGatewayAddressOutput, error) {
+ req, out := c.AssociateNatGatewayAddressRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opAssociateRouteTable = "AssociateRouteTable"
// AssociateRouteTableRequest generates a "aws/request.Request" representing the
@@ -2450,6 +2682,81 @@ func (c *EC2) AttachNetworkInterfaceWithContext(ctx aws.Context, input *AttachNe
return out, req.Send()
}
+const opAttachVerifiedAccessTrustProvider = "AttachVerifiedAccessTrustProvider"
+
+// AttachVerifiedAccessTrustProviderRequest generates a "aws/request.Request" representing the
+// client's request for the AttachVerifiedAccessTrustProvider operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AttachVerifiedAccessTrustProvider for more information on using the AttachVerifiedAccessTrustProvider
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the AttachVerifiedAccessTrustProviderRequest method.
+// req, resp := client.AttachVerifiedAccessTrustProviderRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVerifiedAccessTrustProvider
+func (c *EC2) AttachVerifiedAccessTrustProviderRequest(input *AttachVerifiedAccessTrustProviderInput) (req *request.Request, output *AttachVerifiedAccessTrustProviderOutput) {
+ op := &request.Operation{
+ Name: opAttachVerifiedAccessTrustProvider,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AttachVerifiedAccessTrustProviderInput{}
+ }
+
+ output = &AttachVerifiedAccessTrustProviderOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AttachVerifiedAccessTrustProvider API operation for Amazon Elastic Compute Cloud.
+//
+// A trust provider is a third-party entity that creates, maintains, and manages
+// identity information for users and devices. One or more trust providers can
+// be attached to an Amazon Web Services Verified Access instance.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation AttachVerifiedAccessTrustProvider for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVerifiedAccessTrustProvider
+func (c *EC2) AttachVerifiedAccessTrustProvider(input *AttachVerifiedAccessTrustProviderInput) (*AttachVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.AttachVerifiedAccessTrustProviderRequest(input)
+ return out, req.Send()
+}
+
+// AttachVerifiedAccessTrustProviderWithContext is the same as AttachVerifiedAccessTrustProvider with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AttachVerifiedAccessTrustProvider for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) AttachVerifiedAccessTrustProviderWithContext(ctx aws.Context, input *AttachVerifiedAccessTrustProviderInput, opts ...request.Option) (*AttachVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.AttachVerifiedAccessTrustProviderRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opAttachVolume = "AttachVolume"
// AttachVolumeRequest generates a "aws/request.Request" representing the
@@ -3401,8 +3708,8 @@ func (c *EC2) CancelImageLaunchPermissionRequest(input *CancelImageLaunchPermiss
//
// Removes your Amazon Web Services account from the launch permissions for
// the specified AMI. For more information, see Cancel having an AMI shared
-// with your Amazon Web Services account (https://docs.aws.amazon.com/) in the
-// Amazon Elastic Compute Cloud User Guide.
+// with your Amazon Web Services account (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/cancel-sharing-an-AMI.html)
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3627,9 +3934,10 @@ func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput
//
// Cancels the specified Spot Fleet requests.
//
-// After you cancel a Spot Fleet request, the Spot Fleet launches no new Spot
-// Instances. You must specify whether the Spot Fleet should also terminate
-// its Spot Instances. If you terminate the instances, the Spot Fleet request
+// After you cancel a Spot Fleet request, the Spot Fleet launches no new instances.
+//
+// You must also specify whether a canceled Spot Fleet request should terminate
+// its instances. If you choose to terminate the instances, the Spot Fleet request
// enters the cancelled_terminating state. Otherwise, the Spot Fleet request
// enters the cancelled_running state and the instances continue to run until
// they are interrupted or you terminate them manually.
@@ -3948,11 +4256,11 @@ func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, out
// key that you specify in the request using KmsKeyId. Outposts do not support
// unencrypted snapshots. For more information, Amazon EBS local snapshots on
// Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#ami)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// For more information about the prerequisites and limits when copying an AMI,
// see Copy an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5816,6 +6124,81 @@ func (c *EC2) CreateIpamPoolWithContext(ctx aws.Context, input *CreateIpamPoolIn
return out, req.Send()
}
+const opCreateIpamResourceDiscovery = "CreateIpamResourceDiscovery"
+
+// CreateIpamResourceDiscoveryRequest generates a "aws/request.Request" representing the
+// client's request for the CreateIpamResourceDiscovery operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateIpamResourceDiscovery for more information on using the CreateIpamResourceDiscovery
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateIpamResourceDiscoveryRequest method.
+// req, resp := client.CreateIpamResourceDiscoveryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateIpamResourceDiscovery
+func (c *EC2) CreateIpamResourceDiscoveryRequest(input *CreateIpamResourceDiscoveryInput) (req *request.Request, output *CreateIpamResourceDiscoveryOutput) {
+ op := &request.Operation{
+ Name: opCreateIpamResourceDiscovery,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateIpamResourceDiscoveryInput{}
+ }
+
+ output = &CreateIpamResourceDiscoveryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateIpamResourceDiscovery API operation for Amazon Elastic Compute Cloud.
+//
+// Creates an IPAM resource discovery. A resource discovery is an IPAM component
+// that enables IPAM to manage and monitor resources that belong to the owning
+// account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateIpamResourceDiscovery for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateIpamResourceDiscovery
+func (c *EC2) CreateIpamResourceDiscovery(input *CreateIpamResourceDiscoveryInput) (*CreateIpamResourceDiscoveryOutput, error) {
+ req, out := c.CreateIpamResourceDiscoveryRequest(input)
+ return out, req.Send()
+}
+
+// CreateIpamResourceDiscoveryWithContext is the same as CreateIpamResourceDiscovery with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateIpamResourceDiscovery for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateIpamResourceDiscoveryWithContext(ctx aws.Context, input *CreateIpamResourceDiscoveryInput, opts ...request.Option) (*CreateIpamResourceDiscoveryOutput, error) {
+ req, out := c.CreateIpamResourceDiscoveryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opCreateIpamScope = "CreateIpamScope"
// CreateIpamScopeRequest generates a "aws/request.Request" representing the
@@ -6906,7 +7289,7 @@ func (c *EC2) CreateNetworkInsightsPathRequest(input *CreateNetworkInsightsPathI
//
// Reachability Analyzer enables you to analyze and debug network reachability
// between two resources in your virtual private cloud (VPC). For more information,
-// see What is Reachability Analyzer (https://docs.aws.amazon.com/vpc/latest/reachability/).
+// see the Reachability Analyzer Guide (https://docs.aws.amazon.com/vpc/latest/reachability/).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -7478,10 +7861,10 @@ func (c *EC2) CreateRestoreImageTaskRequest(input *CreateRestoreImageTaskInput)
//
// To use this API, you must have the required permissions. For more information,
// see Permissions for storing and restoring AMIs using Amazon S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// For more information, see Store and restore an AMI using Amazon S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -7848,8 +8231,8 @@ func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Re
// snapshot. You may remount and use your volume while the snapshot status is
// pending.
//
-// To create a snapshot for Amazon EBS volumes that serve as root devices, you
-// should stop the instance before taking the snapshot.
+// When you create a snapshot for an EBS volume that serves as a root device,
+// we recommend that you stop the instance before taking the snapshot.
//
// Snapshots that are taken from encrypted volumes are automatically encrypted.
// Volumes that are created from encrypted snapshots are also automatically
@@ -8100,10 +8483,10 @@ func (c *EC2) CreateStoreImageTaskRequest(input *CreateStoreImageTaskInput) (req
//
// To use this API, you must have the required permissions. For more information,
// see Permissions for storing and restoring AMIs using Amazon S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// For more information, see Store and restore an AMI using Amazon S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -8176,18 +8559,23 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques
// CreateSubnet API operation for Amazon Elastic Compute Cloud.
//
-// Creates a subnet in a specified VPC.
+// Creates a subnet in the specified VPC. For an IPv4 only subnet, specify an
+// IPv4 CIDR block. If the VPC has an IPv6 CIDR block, you can create an IPv6
+// only subnet or a dual stack subnet instead. For an IPv6 only subnet, specify
+// an IPv6 CIDR block. For a dual stack subnet, specify both an IPv4 CIDR block
+// and an IPv6 CIDR block.
//
-// You must specify an IPv4 CIDR block for the subnet. After you create a subnet,
-// you can't change its CIDR block. The allowed block size is between a /16
-// netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR
-// block must not overlap with the CIDR block of an existing subnet in the VPC.
+// A subnet CIDR block must not overlap the CIDR block of an existing subnet
+// in the VPC. After you create a subnet, you can't change its CIDR block.
//
-// If you've associated an IPv6 CIDR block with your VPC, you can create a subnet
-// with an IPv6 CIDR block that uses a /64 prefix length.
+// The allowed size for an IPv4 subnet is between a /28 netmask (16 IP addresses)
+// and a /16 netmask (65,536 IP addresses). Amazon Web Services reserves both
+// the first four and the last IPv4 address in each subnet's CIDR block. They're
+// not available for your use.
//
-// Amazon Web Services reserves both the first four and the last IPv4 address
-// in each subnet's CIDR block. They're not available for use.
+// If you've associated an IPv6 CIDR block with your VPC, you can associate
+// an IPv6 CIDR block with a subnet when you create it. The allowed block size
+// for an IPv6 subnet is a /64 netmask.
//
// If you add more than one subnet to a VPC, they're set up in a star topology
// with a logical router in the middle.
@@ -8196,7 +8584,7 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques
// It's therefore possible to have a subnet with no running instances (they're
// all stopped), but no remaining IP addresses available.
//
-// For more information about subnets, see Your VPC and subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
+// For more information, see Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -9563,6 +9951,311 @@ func (c *EC2) CreateTransitGatewayVpcAttachmentWithContext(ctx aws.Context, inpu
return out, req.Send()
}
+const opCreateVerifiedAccessEndpoint = "CreateVerifiedAccessEndpoint"
+
+// CreateVerifiedAccessEndpointRequest generates a "aws/request.Request" representing the
+// client's request for the CreateVerifiedAccessEndpoint operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateVerifiedAccessEndpoint for more information on using the CreateVerifiedAccessEndpoint
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateVerifiedAccessEndpointRequest method.
+// req, resp := client.CreateVerifiedAccessEndpointRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVerifiedAccessEndpoint
+func (c *EC2) CreateVerifiedAccessEndpointRequest(input *CreateVerifiedAccessEndpointInput) (req *request.Request, output *CreateVerifiedAccessEndpointOutput) {
+ op := &request.Operation{
+ Name: opCreateVerifiedAccessEndpoint,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateVerifiedAccessEndpointInput{}
+ }
+
+ output = &CreateVerifiedAccessEndpointOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateVerifiedAccessEndpoint API operation for Amazon Elastic Compute Cloud.
+//
+// An Amazon Web Services Verified Access endpoint is where you define your
+// application along with an optional endpoint-level access policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateVerifiedAccessEndpoint for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVerifiedAccessEndpoint
+func (c *EC2) CreateVerifiedAccessEndpoint(input *CreateVerifiedAccessEndpointInput) (*CreateVerifiedAccessEndpointOutput, error) {
+ req, out := c.CreateVerifiedAccessEndpointRequest(input)
+ return out, req.Send()
+}
+
+// CreateVerifiedAccessEndpointWithContext is the same as CreateVerifiedAccessEndpoint with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateVerifiedAccessEndpoint for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateVerifiedAccessEndpointWithContext(ctx aws.Context, input *CreateVerifiedAccessEndpointInput, opts ...request.Option) (*CreateVerifiedAccessEndpointOutput, error) {
+ req, out := c.CreateVerifiedAccessEndpointRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateVerifiedAccessGroup = "CreateVerifiedAccessGroup"
+
+// CreateVerifiedAccessGroupRequest generates a "aws/request.Request" representing the
+// client's request for the CreateVerifiedAccessGroup operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateVerifiedAccessGroup for more information on using the CreateVerifiedAccessGroup
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateVerifiedAccessGroupRequest method.
+// req, resp := client.CreateVerifiedAccessGroupRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVerifiedAccessGroup
+func (c *EC2) CreateVerifiedAccessGroupRequest(input *CreateVerifiedAccessGroupInput) (req *request.Request, output *CreateVerifiedAccessGroupOutput) {
+ op := &request.Operation{
+ Name: opCreateVerifiedAccessGroup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateVerifiedAccessGroupInput{}
+ }
+
+ output = &CreateVerifiedAccessGroupOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateVerifiedAccessGroup API operation for Amazon Elastic Compute Cloud.
+//
+// An Amazon Web Services Verified Access group is a collection of Amazon Web
+// Services Verified Access endpoints who's associated applications have similar
+// security requirements. Each instance within an Amazon Web Services Verified
+// Access group shares an Amazon Web Services Verified Access policy. For example,
+// you can group all Amazon Web Services Verified Access instances associated
+// with “sales” applications together and use one common Amazon Web Services
+// Verified Access policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateVerifiedAccessGroup for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVerifiedAccessGroup
+func (c *EC2) CreateVerifiedAccessGroup(input *CreateVerifiedAccessGroupInput) (*CreateVerifiedAccessGroupOutput, error) {
+ req, out := c.CreateVerifiedAccessGroupRequest(input)
+ return out, req.Send()
+}
+
+// CreateVerifiedAccessGroupWithContext is the same as CreateVerifiedAccessGroup with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateVerifiedAccessGroup for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateVerifiedAccessGroupWithContext(ctx aws.Context, input *CreateVerifiedAccessGroupInput, opts ...request.Option) (*CreateVerifiedAccessGroupOutput, error) {
+ req, out := c.CreateVerifiedAccessGroupRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateVerifiedAccessInstance = "CreateVerifiedAccessInstance"
+
+// CreateVerifiedAccessInstanceRequest generates a "aws/request.Request" representing the
+// client's request for the CreateVerifiedAccessInstance operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateVerifiedAccessInstance for more information on using the CreateVerifiedAccessInstance
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateVerifiedAccessInstanceRequest method.
+// req, resp := client.CreateVerifiedAccessInstanceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVerifiedAccessInstance
+func (c *EC2) CreateVerifiedAccessInstanceRequest(input *CreateVerifiedAccessInstanceInput) (req *request.Request, output *CreateVerifiedAccessInstanceOutput) {
+ op := &request.Operation{
+ Name: opCreateVerifiedAccessInstance,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateVerifiedAccessInstanceInput{}
+ }
+
+ output = &CreateVerifiedAccessInstanceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateVerifiedAccessInstance API operation for Amazon Elastic Compute Cloud.
+//
+// An Amazon Web Services Verified Access instance is a regional entity that
+// evaluates application requests and grants access only when your security
+// requirements are met.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateVerifiedAccessInstance for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVerifiedAccessInstance
+func (c *EC2) CreateVerifiedAccessInstance(input *CreateVerifiedAccessInstanceInput) (*CreateVerifiedAccessInstanceOutput, error) {
+ req, out := c.CreateVerifiedAccessInstanceRequest(input)
+ return out, req.Send()
+}
+
+// CreateVerifiedAccessInstanceWithContext is the same as CreateVerifiedAccessInstance with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateVerifiedAccessInstance for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateVerifiedAccessInstanceWithContext(ctx aws.Context, input *CreateVerifiedAccessInstanceInput, opts ...request.Option) (*CreateVerifiedAccessInstanceOutput, error) {
+ req, out := c.CreateVerifiedAccessInstanceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateVerifiedAccessTrustProvider = "CreateVerifiedAccessTrustProvider"
+
+// CreateVerifiedAccessTrustProviderRequest generates a "aws/request.Request" representing the
+// client's request for the CreateVerifiedAccessTrustProvider operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateVerifiedAccessTrustProvider for more information on using the CreateVerifiedAccessTrustProvider
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateVerifiedAccessTrustProviderRequest method.
+// req, resp := client.CreateVerifiedAccessTrustProviderRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVerifiedAccessTrustProvider
+func (c *EC2) CreateVerifiedAccessTrustProviderRequest(input *CreateVerifiedAccessTrustProviderInput) (req *request.Request, output *CreateVerifiedAccessTrustProviderOutput) {
+ op := &request.Operation{
+ Name: opCreateVerifiedAccessTrustProvider,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateVerifiedAccessTrustProviderInput{}
+ }
+
+ output = &CreateVerifiedAccessTrustProviderOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateVerifiedAccessTrustProvider API operation for Amazon Elastic Compute Cloud.
+//
+// A trust provider is a third-party entity that creates, maintains, and manages
+// identity information for users and devices. When an application request is
+// made, the identity information sent by the trust provider will be evaluated
+// by Amazon Web Services Verified Access, before allowing or denying the application
+// request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation CreateVerifiedAccessTrustProvider for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVerifiedAccessTrustProvider
+func (c *EC2) CreateVerifiedAccessTrustProvider(input *CreateVerifiedAccessTrustProviderInput) (*CreateVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.CreateVerifiedAccessTrustProviderRequest(input)
+ return out, req.Send()
+}
+
+// CreateVerifiedAccessTrustProviderWithContext is the same as CreateVerifiedAccessTrustProvider with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateVerifiedAccessTrustProvider for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) CreateVerifiedAccessTrustProviderWithContext(ctx aws.Context, input *CreateVerifiedAccessTrustProviderInput, opts ...request.Option) (*CreateVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.CreateVerifiedAccessTrustProviderRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opCreateVolume = "CreateVolume"
// CreateVolumeRequest generates a "aws/request.Request" representing the
@@ -9697,10 +10390,8 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out
// CreateVpc API operation for Amazon Elastic Compute Cloud.
//
-// Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can
-// create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16
-// netmask (65,536 IPv4 addresses). For more information about how large to
-// make your VPC, see Your VPC and subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
+// Creates a VPC with the specified CIDR blocks. For more information, see VPC
+// CIDR blocks (https://docs.aws.amazon.com/vpc/latest/userguide/configure-your-vpc.html#vpc-cidr-blocks)
// in the Amazon Virtual Private Cloud User Guide.
//
// You can optionally request an IPv6 CIDR block for the VPC. You can request
@@ -9708,9 +10399,9 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out
// or an IPv6 CIDR block from an IPv6 address pool that you provisioned through
// bring your own IP addresses (BYOIP (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)).
//
-// By default, each instance you launch in the VPC has the default DHCP options,
-// which include only a default DNS server that we provide (AmazonProvidedDNS).
-// For more information, see DHCP options sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
+// By default, each instance that you launch in the VPC has the default DHCP
+// options, which include only a default DNS server that we provide (AmazonProvidedDNS).
+// For more information, see DHCP option sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html)
// in the Amazon Virtual Private Cloud User Guide.
//
// You can specify the instance tenancy value for the VPC when you create it.
@@ -9946,7 +10637,7 @@ func (c *EC2) CreateVpcEndpointServiceConfigurationRequest(input *CreateVpcEndpo
// CreateVpcEndpointServiceConfiguration API operation for Amazon Elastic Compute Cloud.
//
// Creates a VPC endpoint service to which service consumers (Amazon Web Services
-// accounts, IAM users, and IAM roles) can connect.
+// accounts, users, and IAM roles) can connect.
//
// Before you create an endpoint service, you must create one of the following
// for your service:
@@ -10970,11 +11661,11 @@ func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Reques
// DeleteFleets API operation for Amazon Elastic Compute Cloud.
//
-// Deletes the specified EC2 Fleet.
+// Deletes the specified EC2 Fleets.
//
// After you delete an EC2 Fleet, it launches no new instances.
//
-// You must specify whether a deleted EC2 Fleet should also terminate its instances.
+// You must also specify whether a deleted EC2 Fleet should terminate its instances.
// If you choose to terminate the instances, the EC2 Fleet enters the deleted_terminating
// state. Otherwise, the EC2 Fleet enters the deleted_running state, and the
// instances continue to run until they are interrupted or you terminate them
@@ -11478,6 +12169,81 @@ func (c *EC2) DeleteIpamPoolWithContext(ctx aws.Context, input *DeleteIpamPoolIn
return out, req.Send()
}
+const opDeleteIpamResourceDiscovery = "DeleteIpamResourceDiscovery"
+
+// DeleteIpamResourceDiscoveryRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteIpamResourceDiscovery operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteIpamResourceDiscovery for more information on using the DeleteIpamResourceDiscovery
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DeleteIpamResourceDiscoveryRequest method.
+// req, resp := client.DeleteIpamResourceDiscoveryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteIpamResourceDiscovery
+func (c *EC2) DeleteIpamResourceDiscoveryRequest(input *DeleteIpamResourceDiscoveryInput) (req *request.Request, output *DeleteIpamResourceDiscoveryOutput) {
+ op := &request.Operation{
+ Name: opDeleteIpamResourceDiscovery,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteIpamResourceDiscoveryInput{}
+ }
+
+ output = &DeleteIpamResourceDiscoveryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteIpamResourceDiscovery API operation for Amazon Elastic Compute Cloud.
+//
+// Deletes an IPAM resource discovery. A resource discovery is an IPAM component
+// that enables IPAM to manage and monitor resources that belong to the owning
+// account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DeleteIpamResourceDiscovery for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteIpamResourceDiscovery
+func (c *EC2) DeleteIpamResourceDiscovery(input *DeleteIpamResourceDiscoveryInput) (*DeleteIpamResourceDiscoveryOutput, error) {
+ req, out := c.DeleteIpamResourceDiscoveryRequest(input)
+ return out, req.Send()
+}
+
+// DeleteIpamResourceDiscoveryWithContext is the same as DeleteIpamResourceDiscovery with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteIpamResourceDiscovery for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DeleteIpamResourceDiscoveryWithContext(ctx aws.Context, input *DeleteIpamResourceDiscoveryInput, opts ...request.Option) (*DeleteIpamResourceDiscoveryOutput, error) {
+ req, out := c.DeleteIpamResourceDiscoveryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDeleteIpamScope = "DeleteIpamScope"
// DeleteIpamScopeRequest generates a "aws/request.Request" representing the
@@ -14764,6 +15530,298 @@ func (c *EC2) DeleteTransitGatewayVpcAttachmentWithContext(ctx aws.Context, inpu
return out, req.Send()
}
+const opDeleteVerifiedAccessEndpoint = "DeleteVerifiedAccessEndpoint"
+
+// DeleteVerifiedAccessEndpointRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteVerifiedAccessEndpoint operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteVerifiedAccessEndpoint for more information on using the DeleteVerifiedAccessEndpoint
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DeleteVerifiedAccessEndpointRequest method.
+// req, resp := client.DeleteVerifiedAccessEndpointRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVerifiedAccessEndpoint
+func (c *EC2) DeleteVerifiedAccessEndpointRequest(input *DeleteVerifiedAccessEndpointInput) (req *request.Request, output *DeleteVerifiedAccessEndpointOutput) {
+ op := &request.Operation{
+ Name: opDeleteVerifiedAccessEndpoint,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteVerifiedAccessEndpointInput{}
+ }
+
+ output = &DeleteVerifiedAccessEndpointOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteVerifiedAccessEndpoint API operation for Amazon Elastic Compute Cloud.
+//
+// Delete an Amazon Web Services Verified Access endpoint.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DeleteVerifiedAccessEndpoint for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVerifiedAccessEndpoint
+func (c *EC2) DeleteVerifiedAccessEndpoint(input *DeleteVerifiedAccessEndpointInput) (*DeleteVerifiedAccessEndpointOutput, error) {
+ req, out := c.DeleteVerifiedAccessEndpointRequest(input)
+ return out, req.Send()
+}
+
+// DeleteVerifiedAccessEndpointWithContext is the same as DeleteVerifiedAccessEndpoint with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteVerifiedAccessEndpoint for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DeleteVerifiedAccessEndpointWithContext(ctx aws.Context, input *DeleteVerifiedAccessEndpointInput, opts ...request.Option) (*DeleteVerifiedAccessEndpointOutput, error) {
+ req, out := c.DeleteVerifiedAccessEndpointRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteVerifiedAccessGroup = "DeleteVerifiedAccessGroup"
+
+// DeleteVerifiedAccessGroupRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteVerifiedAccessGroup operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteVerifiedAccessGroup for more information on using the DeleteVerifiedAccessGroup
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DeleteVerifiedAccessGroupRequest method.
+// req, resp := client.DeleteVerifiedAccessGroupRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVerifiedAccessGroup
+func (c *EC2) DeleteVerifiedAccessGroupRequest(input *DeleteVerifiedAccessGroupInput) (req *request.Request, output *DeleteVerifiedAccessGroupOutput) {
+ op := &request.Operation{
+ Name: opDeleteVerifiedAccessGroup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteVerifiedAccessGroupInput{}
+ }
+
+ output = &DeleteVerifiedAccessGroupOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteVerifiedAccessGroup API operation for Amazon Elastic Compute Cloud.
+//
+// Delete an Amazon Web Services Verified Access group.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DeleteVerifiedAccessGroup for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVerifiedAccessGroup
+func (c *EC2) DeleteVerifiedAccessGroup(input *DeleteVerifiedAccessGroupInput) (*DeleteVerifiedAccessGroupOutput, error) {
+ req, out := c.DeleteVerifiedAccessGroupRequest(input)
+ return out, req.Send()
+}
+
+// DeleteVerifiedAccessGroupWithContext is the same as DeleteVerifiedAccessGroup with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteVerifiedAccessGroup for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DeleteVerifiedAccessGroupWithContext(ctx aws.Context, input *DeleteVerifiedAccessGroupInput, opts ...request.Option) (*DeleteVerifiedAccessGroupOutput, error) {
+ req, out := c.DeleteVerifiedAccessGroupRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteVerifiedAccessInstance = "DeleteVerifiedAccessInstance"
+
+// DeleteVerifiedAccessInstanceRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteVerifiedAccessInstance operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteVerifiedAccessInstance for more information on using the DeleteVerifiedAccessInstance
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DeleteVerifiedAccessInstanceRequest method.
+// req, resp := client.DeleteVerifiedAccessInstanceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVerifiedAccessInstance
+func (c *EC2) DeleteVerifiedAccessInstanceRequest(input *DeleteVerifiedAccessInstanceInput) (req *request.Request, output *DeleteVerifiedAccessInstanceOutput) {
+ op := &request.Operation{
+ Name: opDeleteVerifiedAccessInstance,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteVerifiedAccessInstanceInput{}
+ }
+
+ output = &DeleteVerifiedAccessInstanceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteVerifiedAccessInstance API operation for Amazon Elastic Compute Cloud.
+//
+// Delete an Amazon Web Services Verified Access instance.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DeleteVerifiedAccessInstance for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVerifiedAccessInstance
+func (c *EC2) DeleteVerifiedAccessInstance(input *DeleteVerifiedAccessInstanceInput) (*DeleteVerifiedAccessInstanceOutput, error) {
+ req, out := c.DeleteVerifiedAccessInstanceRequest(input)
+ return out, req.Send()
+}
+
+// DeleteVerifiedAccessInstanceWithContext is the same as DeleteVerifiedAccessInstance with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteVerifiedAccessInstance for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DeleteVerifiedAccessInstanceWithContext(ctx aws.Context, input *DeleteVerifiedAccessInstanceInput, opts ...request.Option) (*DeleteVerifiedAccessInstanceOutput, error) {
+ req, out := c.DeleteVerifiedAccessInstanceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteVerifiedAccessTrustProvider = "DeleteVerifiedAccessTrustProvider"
+
+// DeleteVerifiedAccessTrustProviderRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteVerifiedAccessTrustProvider operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteVerifiedAccessTrustProvider for more information on using the DeleteVerifiedAccessTrustProvider
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DeleteVerifiedAccessTrustProviderRequest method.
+// req, resp := client.DeleteVerifiedAccessTrustProviderRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVerifiedAccessTrustProvider
+func (c *EC2) DeleteVerifiedAccessTrustProviderRequest(input *DeleteVerifiedAccessTrustProviderInput) (req *request.Request, output *DeleteVerifiedAccessTrustProviderOutput) {
+ op := &request.Operation{
+ Name: opDeleteVerifiedAccessTrustProvider,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteVerifiedAccessTrustProviderInput{}
+ }
+
+ output = &DeleteVerifiedAccessTrustProviderOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteVerifiedAccessTrustProvider API operation for Amazon Elastic Compute Cloud.
+//
+// Delete an Amazon Web Services Verified Access trust provider.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DeleteVerifiedAccessTrustProvider for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVerifiedAccessTrustProvider
+func (c *EC2) DeleteVerifiedAccessTrustProvider(input *DeleteVerifiedAccessTrustProviderInput) (*DeleteVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.DeleteVerifiedAccessTrustProviderRequest(input)
+ return out, req.Send()
+}
+
+// DeleteVerifiedAccessTrustProviderWithContext is the same as DeleteVerifiedAccessTrustProvider with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteVerifiedAccessTrustProvider for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DeleteVerifiedAccessTrustProviderWithContext(ctx aws.Context, input *DeleteVerifiedAccessTrustProviderInput, opts ...request.Option) (*DeleteVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.DeleteVerifiedAccessTrustProviderRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDeleteVolume = "DeleteVolume"
// DeleteVolumeRequest generates a "aws/request.Request" representing the
@@ -14965,7 +16023,7 @@ func (c *EC2) DeleteVpcEndpointConnectionNotificationsRequest(input *DeleteVpcEn
// DeleteVpcEndpointConnectionNotifications API operation for Amazon Elastic Compute Cloud.
//
-// Deletes one or more VPC endpoint connection notifications.
+// Deletes the specified VPC endpoint connection notifications.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -15038,10 +16096,10 @@ func (c *EC2) DeleteVpcEndpointServiceConfigurationsRequest(input *DeleteVpcEndp
// DeleteVpcEndpointServiceConfigurations API operation for Amazon Elastic Compute Cloud.
//
-// Deletes one or more VPC endpoint service configurations in your account.
-// Before you delete the endpoint service configuration, you must reject any
-// Available or PendingAcceptance interface endpoint connections that are attached
-// to the service.
+// Deletes the specified VPC endpoint service configurations. Before you can
+// delete an endpoint service configuration, you must reject any Available or
+// PendingAcceptance interface endpoint connections that are attached to the
+// service.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -15114,26 +16172,16 @@ func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *re
// DeleteVpcEndpoints API operation for Amazon Elastic Compute Cloud.
//
-// Deletes one or more specified VPC endpoints. You can delete any of the following
-// types of VPC endpoints.
-//
-// - Gateway endpoint,
-//
-// - Gateway Load Balancer endpoint,
-//
-// - Interface endpoint
+// Deletes the specified VPC endpoints.
//
-// The following rules apply when you delete a VPC endpoint:
+// When you delete a gateway endpoint, we delete the endpoint routes in the
+// route tables for the endpoint.
//
-// - When you delete a gateway endpoint, we delete the endpoint routes in
-// the route tables that are associated with the endpoint.
+// When you delete a Gateway Load Balancer endpoint, we delete its endpoint
+// network interfaces. You can only delete Gateway Load Balancer endpoints when
+// the routes that are associated with the endpoint are deleted.
//
-// - When you delete a Gateway Load Balancer endpoint, we delete the endpoint
-// network interfaces. You can only delete Gateway Load Balancer endpoints
-// when the routes that are associated with the endpoint are deleted.
-//
-// - When you delete an interface endpoint, we delete the endpoint network
-// interfaces.
+// When you delete an interface endpoint, we delete its endpoint network interfaces.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -15759,7 +16807,7 @@ func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request.
// If you deregister an AMI that matches a Recycle Bin retention rule, the AMI
// is retained in the Recycle Bin for the specified retention period. For more
// information, see Recycle Bin (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recycle-bin.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// When you deregister an AMI, it doesn't affect any instances that you've already
// launched from the AMI. You'll continue to incur usage costs for those instances
@@ -16677,7 +17725,7 @@ func (c *EC2) DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(input *Des
// DescribeAwsNetworkPerformanceMetricSubscriptions API operation for Amazon Elastic Compute Cloud.
//
-// Describes the curent Infrastructure Performance metric subscriptions.
+// Describes the current Infrastructure Performance metric subscriptions.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -20680,6 +21728,12 @@ func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re
Name: opDescribeImages,
HTTPMethod: "POST",
HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
}
if input == nil {
@@ -20733,6 +21787,57 @@ func (c *EC2) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesIn
return out, req.Send()
}
+// DescribeImagesPages iterates over the pages of a DescribeImages operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeImages method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeImages operation.
+// pageNum := 0
+// err := client.DescribeImagesPages(params,
+// func(page *ec2.DescribeImagesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) DescribeImagesPages(input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool) error {
+ return c.DescribeImagesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeImagesPagesWithContext same as DescribeImagesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeImagesPagesWithContext(ctx aws.Context, input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeImagesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeImagesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeImagesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeImportImageTasks = "DescribeImportImageTasks"
// DescribeImportImageTasksRequest generates a "aws/request.Request" representing the
@@ -22261,6 +23366,270 @@ func (c *EC2) DescribeIpamPoolsPagesWithContext(ctx aws.Context, input *Describe
return p.Err()
}
+const opDescribeIpamResourceDiscoveries = "DescribeIpamResourceDiscoveries"
+
+// DescribeIpamResourceDiscoveriesRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeIpamResourceDiscoveries operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeIpamResourceDiscoveries for more information on using the DescribeIpamResourceDiscoveries
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeIpamResourceDiscoveriesRequest method.
+// req, resp := client.DescribeIpamResourceDiscoveriesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpamResourceDiscoveries
+func (c *EC2) DescribeIpamResourceDiscoveriesRequest(input *DescribeIpamResourceDiscoveriesInput) (req *request.Request, output *DescribeIpamResourceDiscoveriesOutput) {
+ op := &request.Operation{
+ Name: opDescribeIpamResourceDiscoveries,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeIpamResourceDiscoveriesInput{}
+ }
+
+ output = &DescribeIpamResourceDiscoveriesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeIpamResourceDiscoveries API operation for Amazon Elastic Compute Cloud.
+//
+// Describes IPAM resource discoveries. A resource discovery is an IPAM component
+// that enables IPAM to manage and monitor resources that belong to the owning
+// account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeIpamResourceDiscoveries for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpamResourceDiscoveries
+func (c *EC2) DescribeIpamResourceDiscoveries(input *DescribeIpamResourceDiscoveriesInput) (*DescribeIpamResourceDiscoveriesOutput, error) {
+ req, out := c.DescribeIpamResourceDiscoveriesRequest(input)
+ return out, req.Send()
+}
+
+// DescribeIpamResourceDiscoveriesWithContext is the same as DescribeIpamResourceDiscoveries with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeIpamResourceDiscoveries for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeIpamResourceDiscoveriesWithContext(ctx aws.Context, input *DescribeIpamResourceDiscoveriesInput, opts ...request.Option) (*DescribeIpamResourceDiscoveriesOutput, error) {
+ req, out := c.DescribeIpamResourceDiscoveriesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeIpamResourceDiscoveriesPages iterates over the pages of a DescribeIpamResourceDiscoveries operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeIpamResourceDiscoveries method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeIpamResourceDiscoveries operation.
+// pageNum := 0
+// err := client.DescribeIpamResourceDiscoveriesPages(params,
+// func(page *ec2.DescribeIpamResourceDiscoveriesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) DescribeIpamResourceDiscoveriesPages(input *DescribeIpamResourceDiscoveriesInput, fn func(*DescribeIpamResourceDiscoveriesOutput, bool) bool) error {
+ return c.DescribeIpamResourceDiscoveriesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeIpamResourceDiscoveriesPagesWithContext same as DescribeIpamResourceDiscoveriesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeIpamResourceDiscoveriesPagesWithContext(ctx aws.Context, input *DescribeIpamResourceDiscoveriesInput, fn func(*DescribeIpamResourceDiscoveriesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeIpamResourceDiscoveriesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeIpamResourceDiscoveriesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeIpamResourceDiscoveriesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opDescribeIpamResourceDiscoveryAssociations = "DescribeIpamResourceDiscoveryAssociations"
+
+// DescribeIpamResourceDiscoveryAssociationsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeIpamResourceDiscoveryAssociations operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeIpamResourceDiscoveryAssociations for more information on using the DescribeIpamResourceDiscoveryAssociations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeIpamResourceDiscoveryAssociationsRequest method.
+// req, resp := client.DescribeIpamResourceDiscoveryAssociationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpamResourceDiscoveryAssociations
+func (c *EC2) DescribeIpamResourceDiscoveryAssociationsRequest(input *DescribeIpamResourceDiscoveryAssociationsInput) (req *request.Request, output *DescribeIpamResourceDiscoveryAssociationsOutput) {
+ op := &request.Operation{
+ Name: opDescribeIpamResourceDiscoveryAssociations,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeIpamResourceDiscoveryAssociationsInput{}
+ }
+
+ output = &DescribeIpamResourceDiscoveryAssociationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeIpamResourceDiscoveryAssociations API operation for Amazon Elastic Compute Cloud.
+//
+// Describes resource discovery association with an Amazon VPC IPAM. An associated
+// resource discovery is a resource discovery that has been associated with
+// an IPAM..
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeIpamResourceDiscoveryAssociations for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpamResourceDiscoveryAssociations
+func (c *EC2) DescribeIpamResourceDiscoveryAssociations(input *DescribeIpamResourceDiscoveryAssociationsInput) (*DescribeIpamResourceDiscoveryAssociationsOutput, error) {
+ req, out := c.DescribeIpamResourceDiscoveryAssociationsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeIpamResourceDiscoveryAssociationsWithContext is the same as DescribeIpamResourceDiscoveryAssociations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeIpamResourceDiscoveryAssociations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeIpamResourceDiscoveryAssociationsWithContext(ctx aws.Context, input *DescribeIpamResourceDiscoveryAssociationsInput, opts ...request.Option) (*DescribeIpamResourceDiscoveryAssociationsOutput, error) {
+ req, out := c.DescribeIpamResourceDiscoveryAssociationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeIpamResourceDiscoveryAssociationsPages iterates over the pages of a DescribeIpamResourceDiscoveryAssociations operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeIpamResourceDiscoveryAssociations method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeIpamResourceDiscoveryAssociations operation.
+// pageNum := 0
+// err := client.DescribeIpamResourceDiscoveryAssociationsPages(params,
+// func(page *ec2.DescribeIpamResourceDiscoveryAssociationsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) DescribeIpamResourceDiscoveryAssociationsPages(input *DescribeIpamResourceDiscoveryAssociationsInput, fn func(*DescribeIpamResourceDiscoveryAssociationsOutput, bool) bool) error {
+ return c.DescribeIpamResourceDiscoveryAssociationsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeIpamResourceDiscoveryAssociationsPagesWithContext same as DescribeIpamResourceDiscoveryAssociationsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeIpamResourceDiscoveryAssociationsPagesWithContext(ctx aws.Context, input *DescribeIpamResourceDiscoveryAssociationsInput, fn func(*DescribeIpamResourceDiscoveryAssociationsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeIpamResourceDiscoveryAssociationsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeIpamResourceDiscoveryAssociationsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeIpamResourceDiscoveryAssociationsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeIpamScopes = "DescribeIpamScopes"
// DescribeIpamScopesRequest generates a "aws/request.Request" representing the
@@ -27349,11 +28718,8 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ
// own or have explicit permissions, or all for public snapshots.
//
// If you are describing a long list of snapshots, we recommend that you paginate
-// the output to make the list more manageable. The MaxResults parameter sets
-// the maximum number of results returned in a single page. If the list of results
-// exceeds your MaxResults value, then that number of results is returned along
-// with a NextToken value that can be passed to a subsequent DescribeSnapshots
-// request to retrieve the remaining results.
+// the output to make the list more manageable. For more information, see Pagination
+// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
//
// To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.
//
@@ -27859,11 +29225,11 @@ func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceReq
// with a filter to look for instances where the instance lifecycle is spot.
//
// We recommend that you set MaxResults to a value between 5 and 1000 to limit
-// the number of results returned. This paginates the output, which makes the
-// list more manageable and returns the results faster. If the list of results
-// exceeds your MaxResults value, then that number of results is returned along
-// with a NextToken value that can be passed to a subsequent DescribeSpotInstanceRequests
-// request to retrieve the remaining results.
+// the number of items returned. This paginates the output, which makes the
+// list more manageable and returns the items faster. If the list of items exceeds
+// your MaxResults value, then that number of items is returned along with a
+// NextToken value that can be passed to a subsequent DescribeSpotInstanceRequests
+// request to retrieve the remaining items.
//
// Spot Instance requests are deleted four hours after they are canceled and
// their instances are terminated.
@@ -28279,10 +29645,10 @@ func (c *EC2) DescribeStoreImageTasksRequest(input *DescribeStoreImageTasksInput
//
// To use this API, you must have the required permissions. For more information,
// see Permissions for storing and restoring AMIs using Amazon S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// For more information, see Store and restore an AMI using Amazon S3 (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -30459,6 +31825,657 @@ func (c *EC2) DescribeTrunkInterfaceAssociationsPagesWithContext(ctx aws.Context
return p.Err()
}
+const opDescribeVerifiedAccessEndpoints = "DescribeVerifiedAccessEndpoints"
+
+// DescribeVerifiedAccessEndpointsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeVerifiedAccessEndpoints operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeVerifiedAccessEndpoints for more information on using the DescribeVerifiedAccessEndpoints
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeVerifiedAccessEndpointsRequest method.
+// req, resp := client.DescribeVerifiedAccessEndpointsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessEndpoints
+func (c *EC2) DescribeVerifiedAccessEndpointsRequest(input *DescribeVerifiedAccessEndpointsInput) (req *request.Request, output *DescribeVerifiedAccessEndpointsOutput) {
+ op := &request.Operation{
+ Name: opDescribeVerifiedAccessEndpoints,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeVerifiedAccessEndpointsInput{}
+ }
+
+ output = &DescribeVerifiedAccessEndpointsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeVerifiedAccessEndpoints API operation for Amazon Elastic Compute Cloud.
+//
+// Describe Amazon Web Services Verified Access endpoints.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeVerifiedAccessEndpoints for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessEndpoints
+func (c *EC2) DescribeVerifiedAccessEndpoints(input *DescribeVerifiedAccessEndpointsInput) (*DescribeVerifiedAccessEndpointsOutput, error) {
+ req, out := c.DescribeVerifiedAccessEndpointsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessEndpointsWithContext is the same as DescribeVerifiedAccessEndpoints with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeVerifiedAccessEndpoints for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessEndpointsWithContext(ctx aws.Context, input *DescribeVerifiedAccessEndpointsInput, opts ...request.Option) (*DescribeVerifiedAccessEndpointsOutput, error) {
+ req, out := c.DescribeVerifiedAccessEndpointsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessEndpointsPages iterates over the pages of a DescribeVerifiedAccessEndpoints operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeVerifiedAccessEndpoints method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeVerifiedAccessEndpoints operation.
+// pageNum := 0
+// err := client.DescribeVerifiedAccessEndpointsPages(params,
+// func(page *ec2.DescribeVerifiedAccessEndpointsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) DescribeVerifiedAccessEndpointsPages(input *DescribeVerifiedAccessEndpointsInput, fn func(*DescribeVerifiedAccessEndpointsOutput, bool) bool) error {
+ return c.DescribeVerifiedAccessEndpointsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeVerifiedAccessEndpointsPagesWithContext same as DescribeVerifiedAccessEndpointsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessEndpointsPagesWithContext(ctx aws.Context, input *DescribeVerifiedAccessEndpointsInput, fn func(*DescribeVerifiedAccessEndpointsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeVerifiedAccessEndpointsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeVerifiedAccessEndpointsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeVerifiedAccessEndpointsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opDescribeVerifiedAccessGroups = "DescribeVerifiedAccessGroups"
+
+// DescribeVerifiedAccessGroupsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeVerifiedAccessGroups operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeVerifiedAccessGroups for more information on using the DescribeVerifiedAccessGroups
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeVerifiedAccessGroupsRequest method.
+// req, resp := client.DescribeVerifiedAccessGroupsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessGroups
+func (c *EC2) DescribeVerifiedAccessGroupsRequest(input *DescribeVerifiedAccessGroupsInput) (req *request.Request, output *DescribeVerifiedAccessGroupsOutput) {
+ op := &request.Operation{
+ Name: opDescribeVerifiedAccessGroups,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeVerifiedAccessGroupsInput{}
+ }
+
+ output = &DescribeVerifiedAccessGroupsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeVerifiedAccessGroups API operation for Amazon Elastic Compute Cloud.
+//
+// Describe details of existing Verified Access groups.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeVerifiedAccessGroups for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessGroups
+func (c *EC2) DescribeVerifiedAccessGroups(input *DescribeVerifiedAccessGroupsInput) (*DescribeVerifiedAccessGroupsOutput, error) {
+ req, out := c.DescribeVerifiedAccessGroupsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessGroupsWithContext is the same as DescribeVerifiedAccessGroups with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeVerifiedAccessGroups for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessGroupsWithContext(ctx aws.Context, input *DescribeVerifiedAccessGroupsInput, opts ...request.Option) (*DescribeVerifiedAccessGroupsOutput, error) {
+ req, out := c.DescribeVerifiedAccessGroupsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessGroupsPages iterates over the pages of a DescribeVerifiedAccessGroups operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeVerifiedAccessGroups method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeVerifiedAccessGroups operation.
+// pageNum := 0
+// err := client.DescribeVerifiedAccessGroupsPages(params,
+// func(page *ec2.DescribeVerifiedAccessGroupsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) DescribeVerifiedAccessGroupsPages(input *DescribeVerifiedAccessGroupsInput, fn func(*DescribeVerifiedAccessGroupsOutput, bool) bool) error {
+ return c.DescribeVerifiedAccessGroupsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeVerifiedAccessGroupsPagesWithContext same as DescribeVerifiedAccessGroupsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessGroupsPagesWithContext(ctx aws.Context, input *DescribeVerifiedAccessGroupsInput, fn func(*DescribeVerifiedAccessGroupsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeVerifiedAccessGroupsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeVerifiedAccessGroupsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeVerifiedAccessGroupsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opDescribeVerifiedAccessInstanceLoggingConfigurations = "DescribeVerifiedAccessInstanceLoggingConfigurations"
+
+// DescribeVerifiedAccessInstanceLoggingConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeVerifiedAccessInstanceLoggingConfigurations operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeVerifiedAccessInstanceLoggingConfigurations for more information on using the DescribeVerifiedAccessInstanceLoggingConfigurations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeVerifiedAccessInstanceLoggingConfigurationsRequest method.
+// req, resp := client.DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessInstanceLoggingConfigurations
+func (c *EC2) DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(input *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) (req *request.Request, output *DescribeVerifiedAccessInstanceLoggingConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opDescribeVerifiedAccessInstanceLoggingConfigurations,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeVerifiedAccessInstanceLoggingConfigurationsInput{}
+ }
+
+ output = &DescribeVerifiedAccessInstanceLoggingConfigurationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeVerifiedAccessInstanceLoggingConfigurations API operation for Amazon Elastic Compute Cloud.
+//
+// Describes the current logging configuration for the Amazon Web Services Verified
+// Access instances.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeVerifiedAccessInstanceLoggingConfigurations for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessInstanceLoggingConfigurations
+func (c *EC2) DescribeVerifiedAccessInstanceLoggingConfigurations(input *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) (*DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, error) {
+ req, out := c.DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessInstanceLoggingConfigurationsWithContext is the same as DescribeVerifiedAccessInstanceLoggingConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeVerifiedAccessInstanceLoggingConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessInstanceLoggingConfigurationsWithContext(ctx aws.Context, input *DescribeVerifiedAccessInstanceLoggingConfigurationsInput, opts ...request.Option) (*DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, error) {
+ req, out := c.DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessInstanceLoggingConfigurationsPages iterates over the pages of a DescribeVerifiedAccessInstanceLoggingConfigurations operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeVerifiedAccessInstanceLoggingConfigurations method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeVerifiedAccessInstanceLoggingConfigurations operation.
+// pageNum := 0
+// err := client.DescribeVerifiedAccessInstanceLoggingConfigurationsPages(params,
+// func(page *ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) DescribeVerifiedAccessInstanceLoggingConfigurationsPages(input *DescribeVerifiedAccessInstanceLoggingConfigurationsInput, fn func(*DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, bool) bool) error {
+ return c.DescribeVerifiedAccessInstanceLoggingConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeVerifiedAccessInstanceLoggingConfigurationsPagesWithContext same as DescribeVerifiedAccessInstanceLoggingConfigurationsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessInstanceLoggingConfigurationsPagesWithContext(ctx aws.Context, input *DescribeVerifiedAccessInstanceLoggingConfigurationsInput, fn func(*DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeVerifiedAccessInstanceLoggingConfigurationsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeVerifiedAccessInstanceLoggingConfigurationsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opDescribeVerifiedAccessInstances = "DescribeVerifiedAccessInstances"
+
+// DescribeVerifiedAccessInstancesRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeVerifiedAccessInstances operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeVerifiedAccessInstances for more information on using the DescribeVerifiedAccessInstances
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeVerifiedAccessInstancesRequest method.
+// req, resp := client.DescribeVerifiedAccessInstancesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessInstances
+func (c *EC2) DescribeVerifiedAccessInstancesRequest(input *DescribeVerifiedAccessInstancesInput) (req *request.Request, output *DescribeVerifiedAccessInstancesOutput) {
+ op := &request.Operation{
+ Name: opDescribeVerifiedAccessInstances,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeVerifiedAccessInstancesInput{}
+ }
+
+ output = &DescribeVerifiedAccessInstancesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeVerifiedAccessInstances API operation for Amazon Elastic Compute Cloud.
+//
+// Describe Verified Access instances.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeVerifiedAccessInstances for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessInstances
+func (c *EC2) DescribeVerifiedAccessInstances(input *DescribeVerifiedAccessInstancesInput) (*DescribeVerifiedAccessInstancesOutput, error) {
+ req, out := c.DescribeVerifiedAccessInstancesRequest(input)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessInstancesWithContext is the same as DescribeVerifiedAccessInstances with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeVerifiedAccessInstances for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessInstancesWithContext(ctx aws.Context, input *DescribeVerifiedAccessInstancesInput, opts ...request.Option) (*DescribeVerifiedAccessInstancesOutput, error) {
+ req, out := c.DescribeVerifiedAccessInstancesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessInstancesPages iterates over the pages of a DescribeVerifiedAccessInstances operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeVerifiedAccessInstances method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeVerifiedAccessInstances operation.
+// pageNum := 0
+// err := client.DescribeVerifiedAccessInstancesPages(params,
+// func(page *ec2.DescribeVerifiedAccessInstancesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) DescribeVerifiedAccessInstancesPages(input *DescribeVerifiedAccessInstancesInput, fn func(*DescribeVerifiedAccessInstancesOutput, bool) bool) error {
+ return c.DescribeVerifiedAccessInstancesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeVerifiedAccessInstancesPagesWithContext same as DescribeVerifiedAccessInstancesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessInstancesPagesWithContext(ctx aws.Context, input *DescribeVerifiedAccessInstancesInput, fn func(*DescribeVerifiedAccessInstancesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeVerifiedAccessInstancesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeVerifiedAccessInstancesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeVerifiedAccessInstancesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opDescribeVerifiedAccessTrustProviders = "DescribeVerifiedAccessTrustProviders"
+
+// DescribeVerifiedAccessTrustProvidersRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeVerifiedAccessTrustProviders operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeVerifiedAccessTrustProviders for more information on using the DescribeVerifiedAccessTrustProviders
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeVerifiedAccessTrustProvidersRequest method.
+// req, resp := client.DescribeVerifiedAccessTrustProvidersRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessTrustProviders
+func (c *EC2) DescribeVerifiedAccessTrustProvidersRequest(input *DescribeVerifiedAccessTrustProvidersInput) (req *request.Request, output *DescribeVerifiedAccessTrustProvidersOutput) {
+ op := &request.Operation{
+ Name: opDescribeVerifiedAccessTrustProviders,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeVerifiedAccessTrustProvidersInput{}
+ }
+
+ output = &DescribeVerifiedAccessTrustProvidersOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeVerifiedAccessTrustProviders API operation for Amazon Elastic Compute Cloud.
+//
+// Describe details of existing Verified Access trust providers.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DescribeVerifiedAccessTrustProviders for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVerifiedAccessTrustProviders
+func (c *EC2) DescribeVerifiedAccessTrustProviders(input *DescribeVerifiedAccessTrustProvidersInput) (*DescribeVerifiedAccessTrustProvidersOutput, error) {
+ req, out := c.DescribeVerifiedAccessTrustProvidersRequest(input)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessTrustProvidersWithContext is the same as DescribeVerifiedAccessTrustProviders with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeVerifiedAccessTrustProviders for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessTrustProvidersWithContext(ctx aws.Context, input *DescribeVerifiedAccessTrustProvidersInput, opts ...request.Option) (*DescribeVerifiedAccessTrustProvidersOutput, error) {
+ req, out := c.DescribeVerifiedAccessTrustProvidersRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeVerifiedAccessTrustProvidersPages iterates over the pages of a DescribeVerifiedAccessTrustProviders operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeVerifiedAccessTrustProviders method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeVerifiedAccessTrustProviders operation.
+// pageNum := 0
+// err := client.DescribeVerifiedAccessTrustProvidersPages(params,
+// func(page *ec2.DescribeVerifiedAccessTrustProvidersOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) DescribeVerifiedAccessTrustProvidersPages(input *DescribeVerifiedAccessTrustProvidersInput, fn func(*DescribeVerifiedAccessTrustProvidersOutput, bool) bool) error {
+ return c.DescribeVerifiedAccessTrustProvidersPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeVerifiedAccessTrustProvidersPagesWithContext same as DescribeVerifiedAccessTrustProvidersPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DescribeVerifiedAccessTrustProvidersPagesWithContext(ctx aws.Context, input *DescribeVerifiedAccessTrustProvidersInput, fn func(*DescribeVerifiedAccessTrustProvidersOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeVerifiedAccessTrustProvidersInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeVerifiedAccessTrustProvidersRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeVerifiedAccessTrustProvidersOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeVolumeAttribute = "DescribeVolumeAttribute"
// DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the
@@ -30753,11 +32770,8 @@ func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.
// Describes the specified EBS volumes or all of your EBS volumes.
//
// If you are describing a long list of volumes, we recommend that you paginate
-// the output to make the list more manageable. The MaxResults parameter sets
-// the maximum number of results returned in a single page. If the list of results
-// exceeds your MaxResults value, then that number of results is returned along
-// with a NextToken value that can be passed to a subsequent DescribeVolumes
-// request to retrieve the remaining results.
+// the output to make the list more manageable. For more information, see Pagination
+// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
//
// For more information about EBS volumes, see Amazon EBS volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html)
// in the Amazon Elastic Compute Cloud User Guide.
@@ -31925,7 +33939,7 @@ func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req
// DescribeVpcEndpoints API operation for Amazon Elastic Compute Cloud.
//
-// Describes one or more of your VPC endpoints.
+// Describes your VPC endpoints.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -32647,6 +34661,79 @@ func (c *EC2) DetachNetworkInterfaceWithContext(ctx aws.Context, input *DetachNe
return out, req.Send()
}
+const opDetachVerifiedAccessTrustProvider = "DetachVerifiedAccessTrustProvider"
+
+// DetachVerifiedAccessTrustProviderRequest generates a "aws/request.Request" representing the
+// client's request for the DetachVerifiedAccessTrustProvider operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DetachVerifiedAccessTrustProvider for more information on using the DetachVerifiedAccessTrustProvider
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DetachVerifiedAccessTrustProviderRequest method.
+// req, resp := client.DetachVerifiedAccessTrustProviderRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVerifiedAccessTrustProvider
+func (c *EC2) DetachVerifiedAccessTrustProviderRequest(input *DetachVerifiedAccessTrustProviderInput) (req *request.Request, output *DetachVerifiedAccessTrustProviderOutput) {
+ op := &request.Operation{
+ Name: opDetachVerifiedAccessTrustProvider,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DetachVerifiedAccessTrustProviderInput{}
+ }
+
+ output = &DetachVerifiedAccessTrustProviderOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DetachVerifiedAccessTrustProvider API operation for Amazon Elastic Compute Cloud.
+//
+// Detach a trust provider from an Amazon Web Services Verified Access instance.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DetachVerifiedAccessTrustProvider for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVerifiedAccessTrustProvider
+func (c *EC2) DetachVerifiedAccessTrustProvider(input *DetachVerifiedAccessTrustProviderInput) (*DetachVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.DetachVerifiedAccessTrustProviderRequest(input)
+ return out, req.Send()
+}
+
+// DetachVerifiedAccessTrustProviderWithContext is the same as DetachVerifiedAccessTrustProvider with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DetachVerifiedAccessTrustProvider for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DetachVerifiedAccessTrustProviderWithContext(ctx aws.Context, input *DetachVerifiedAccessTrustProviderInput, opts ...request.Option) (*DetachVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.DetachVerifiedAccessTrustProviderRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDetachVolume = "DetachVolume"
// DetachVolumeRequest generates a "aws/request.Request" representing the
@@ -33242,7 +35329,7 @@ func (c *EC2) DisableImageDeprecationRequest(input *DisableImageDeprecationInput
// Cancels the deprecation of the specified AMI.
//
// For more information, see Deprecate an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-deprecate.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -34132,6 +36219,167 @@ func (c *EC2) DisassociateInstanceEventWindowWithContext(ctx aws.Context, input
return out, req.Send()
}
+const opDisassociateIpamResourceDiscovery = "DisassociateIpamResourceDiscovery"
+
+// DisassociateIpamResourceDiscoveryRequest generates a "aws/request.Request" representing the
+// client's request for the DisassociateIpamResourceDiscovery operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DisassociateIpamResourceDiscovery for more information on using the DisassociateIpamResourceDiscovery
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DisassociateIpamResourceDiscoveryRequest method.
+// req, resp := client.DisassociateIpamResourceDiscoveryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIpamResourceDiscovery
+func (c *EC2) DisassociateIpamResourceDiscoveryRequest(input *DisassociateIpamResourceDiscoveryInput) (req *request.Request, output *DisassociateIpamResourceDiscoveryOutput) {
+ op := &request.Operation{
+ Name: opDisassociateIpamResourceDiscovery,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DisassociateIpamResourceDiscoveryInput{}
+ }
+
+ output = &DisassociateIpamResourceDiscoveryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DisassociateIpamResourceDiscovery API operation for Amazon Elastic Compute Cloud.
+//
+// Disassociates a resource discovery from an Amazon VPC IPAM. A resource discovery
+// is an IPAM component that enables IPAM to manage and monitor resources that
+// belong to the owning account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DisassociateIpamResourceDiscovery for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIpamResourceDiscovery
+func (c *EC2) DisassociateIpamResourceDiscovery(input *DisassociateIpamResourceDiscoveryInput) (*DisassociateIpamResourceDiscoveryOutput, error) {
+ req, out := c.DisassociateIpamResourceDiscoveryRequest(input)
+ return out, req.Send()
+}
+
+// DisassociateIpamResourceDiscoveryWithContext is the same as DisassociateIpamResourceDiscovery with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisassociateIpamResourceDiscovery for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DisassociateIpamResourceDiscoveryWithContext(ctx aws.Context, input *DisassociateIpamResourceDiscoveryInput, opts ...request.Option) (*DisassociateIpamResourceDiscoveryOutput, error) {
+ req, out := c.DisassociateIpamResourceDiscoveryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDisassociateNatGatewayAddress = "DisassociateNatGatewayAddress"
+
+// DisassociateNatGatewayAddressRequest generates a "aws/request.Request" representing the
+// client's request for the DisassociateNatGatewayAddress operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DisassociateNatGatewayAddress for more information on using the DisassociateNatGatewayAddress
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DisassociateNatGatewayAddressRequest method.
+// req, resp := client.DisassociateNatGatewayAddressRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateNatGatewayAddress
+func (c *EC2) DisassociateNatGatewayAddressRequest(input *DisassociateNatGatewayAddressInput) (req *request.Request, output *DisassociateNatGatewayAddressOutput) {
+ op := &request.Operation{
+ Name: opDisassociateNatGatewayAddress,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DisassociateNatGatewayAddressInput{}
+ }
+
+ output = &DisassociateNatGatewayAddressOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DisassociateNatGatewayAddress API operation for Amazon Elastic Compute Cloud.
+//
+// Disassociates secondary Elastic IP addresses (EIPs) from a public NAT gateway.
+// You cannot disassociate your primary EIP. For more information, see Edit
+// secondary IP address associations (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-edit-secondary)
+// in the Amazon Virtual Private Cloud User Guide.
+//
+// While disassociating is in progress, you cannot associate/disassociate additional
+// EIPs while the connections are being drained. You are, however, allowed to
+// delete the NAT gateway.
+//
+// An EIP will only be released at the end of MaxDrainDurationSeconds. The EIPs
+// stay associated and support the existing connections but do not support any
+// new connections (new connections are distributed across the remaining associated
+// EIPs). As the existing connections drain out, the EIPs (and the corresponding
+// private IPs mapped to them) get released.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DisassociateNatGatewayAddress for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateNatGatewayAddress
+func (c *EC2) DisassociateNatGatewayAddress(input *DisassociateNatGatewayAddressInput) (*DisassociateNatGatewayAddressOutput, error) {
+ req, out := c.DisassociateNatGatewayAddressRequest(input)
+ return out, req.Send()
+}
+
+// DisassociateNatGatewayAddressWithContext is the same as DisassociateNatGatewayAddress with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisassociateNatGatewayAddress for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DisassociateNatGatewayAddressWithContext(ctx aws.Context, input *DisassociateNatGatewayAddressInput, opts ...request.Option) (*DisassociateNatGatewayAddressOutput, error) {
+ req, out := c.DisassociateNatGatewayAddressRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDisassociateRouteTable = "DisassociateRouteTable"
// DisassociateRouteTableRequest generates a "aws/request.Request" representing the
@@ -35105,7 +37353,7 @@ func (c *EC2) EnableImageDeprecationRequest(input *EnableImageDeprecationInput)
// Enables deprecation of the specified AMI at the specified date and time.
//
// For more information, see Deprecate an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-deprecate.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -35254,6 +37502,13 @@ func (c *EC2) EnableReachabilityAnalyzerOrganizationSharingRequest(input *Enable
// EnableReachabilityAnalyzerOrganizationSharing API operation for Amazon Elastic Compute Cloud.
//
+// Establishes a trust relationship between Reachability Analyzer and Organizations.
+// This operation must be performed by the management account for the organization.
+//
+// After you establish a trust relationship, a user in the management account
+// or a delegated administrator account can run a cross-account analysis using
+// resources from the member accounts.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -37596,6 +39851,273 @@ func (c *EC2) GetIpamAddressHistoryPagesWithContext(ctx aws.Context, input *GetI
return p.Err()
}
+const opGetIpamDiscoveredAccounts = "GetIpamDiscoveredAccounts"
+
+// GetIpamDiscoveredAccountsRequest generates a "aws/request.Request" representing the
+// client's request for the GetIpamDiscoveredAccounts operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetIpamDiscoveredAccounts for more information on using the GetIpamDiscoveredAccounts
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the GetIpamDiscoveredAccountsRequest method.
+// req, resp := client.GetIpamDiscoveredAccountsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetIpamDiscoveredAccounts
+func (c *EC2) GetIpamDiscoveredAccountsRequest(input *GetIpamDiscoveredAccountsInput) (req *request.Request, output *GetIpamDiscoveredAccountsOutput) {
+ op := &request.Operation{
+ Name: opGetIpamDiscoveredAccounts,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &GetIpamDiscoveredAccountsInput{}
+ }
+
+ output = &GetIpamDiscoveredAccountsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetIpamDiscoveredAccounts API operation for Amazon Elastic Compute Cloud.
+//
+// Gets IPAM discovered accounts. A discovered account is an Amazon Web Services
+// account that is monitored under a resource discovery. If you have integrated
+// IPAM with Amazon Web Services Organizations, all accounts in the organization
+// are discovered accounts. Only the IPAM account can get all discovered accounts
+// in the organization.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetIpamDiscoveredAccounts for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetIpamDiscoveredAccounts
+func (c *EC2) GetIpamDiscoveredAccounts(input *GetIpamDiscoveredAccountsInput) (*GetIpamDiscoveredAccountsOutput, error) {
+ req, out := c.GetIpamDiscoveredAccountsRequest(input)
+ return out, req.Send()
+}
+
+// GetIpamDiscoveredAccountsWithContext is the same as GetIpamDiscoveredAccounts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetIpamDiscoveredAccounts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetIpamDiscoveredAccountsWithContext(ctx aws.Context, input *GetIpamDiscoveredAccountsInput, opts ...request.Option) (*GetIpamDiscoveredAccountsOutput, error) {
+ req, out := c.GetIpamDiscoveredAccountsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// GetIpamDiscoveredAccountsPages iterates over the pages of a GetIpamDiscoveredAccounts operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See GetIpamDiscoveredAccounts method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a GetIpamDiscoveredAccounts operation.
+// pageNum := 0
+// err := client.GetIpamDiscoveredAccountsPages(params,
+// func(page *ec2.GetIpamDiscoveredAccountsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) GetIpamDiscoveredAccountsPages(input *GetIpamDiscoveredAccountsInput, fn func(*GetIpamDiscoveredAccountsOutput, bool) bool) error {
+ return c.GetIpamDiscoveredAccountsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// GetIpamDiscoveredAccountsPagesWithContext same as GetIpamDiscoveredAccountsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetIpamDiscoveredAccountsPagesWithContext(ctx aws.Context, input *GetIpamDiscoveredAccountsInput, fn func(*GetIpamDiscoveredAccountsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *GetIpamDiscoveredAccountsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.GetIpamDiscoveredAccountsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*GetIpamDiscoveredAccountsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opGetIpamDiscoveredResourceCidrs = "GetIpamDiscoveredResourceCidrs"
+
+// GetIpamDiscoveredResourceCidrsRequest generates a "aws/request.Request" representing the
+// client's request for the GetIpamDiscoveredResourceCidrs operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetIpamDiscoveredResourceCidrs for more information on using the GetIpamDiscoveredResourceCidrs
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the GetIpamDiscoveredResourceCidrsRequest method.
+// req, resp := client.GetIpamDiscoveredResourceCidrsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetIpamDiscoveredResourceCidrs
+func (c *EC2) GetIpamDiscoveredResourceCidrsRequest(input *GetIpamDiscoveredResourceCidrsInput) (req *request.Request, output *GetIpamDiscoveredResourceCidrsOutput) {
+ op := &request.Operation{
+ Name: opGetIpamDiscoveredResourceCidrs,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &GetIpamDiscoveredResourceCidrsInput{}
+ }
+
+ output = &GetIpamDiscoveredResourceCidrsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetIpamDiscoveredResourceCidrs API operation for Amazon Elastic Compute Cloud.
+//
+// Returns the resource CIDRs that are monitored as part of a resource discovery.
+// A discovered resource is a resource CIDR monitored under a resource discovery.
+// The following resources can be discovered: VPCs, Public IPv4 pools, VPC subnets,
+// and Elastic IP addresses.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetIpamDiscoveredResourceCidrs for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetIpamDiscoveredResourceCidrs
+func (c *EC2) GetIpamDiscoveredResourceCidrs(input *GetIpamDiscoveredResourceCidrsInput) (*GetIpamDiscoveredResourceCidrsOutput, error) {
+ req, out := c.GetIpamDiscoveredResourceCidrsRequest(input)
+ return out, req.Send()
+}
+
+// GetIpamDiscoveredResourceCidrsWithContext is the same as GetIpamDiscoveredResourceCidrs with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetIpamDiscoveredResourceCidrs for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetIpamDiscoveredResourceCidrsWithContext(ctx aws.Context, input *GetIpamDiscoveredResourceCidrsInput, opts ...request.Option) (*GetIpamDiscoveredResourceCidrsOutput, error) {
+ req, out := c.GetIpamDiscoveredResourceCidrsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// GetIpamDiscoveredResourceCidrsPages iterates over the pages of a GetIpamDiscoveredResourceCidrs operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See GetIpamDiscoveredResourceCidrs method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a GetIpamDiscoveredResourceCidrs operation.
+// pageNum := 0
+// err := client.GetIpamDiscoveredResourceCidrsPages(params,
+// func(page *ec2.GetIpamDiscoveredResourceCidrsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *EC2) GetIpamDiscoveredResourceCidrsPages(input *GetIpamDiscoveredResourceCidrsInput, fn func(*GetIpamDiscoveredResourceCidrsOutput, bool) bool) error {
+ return c.GetIpamDiscoveredResourceCidrsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// GetIpamDiscoveredResourceCidrsPagesWithContext same as GetIpamDiscoveredResourceCidrsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetIpamDiscoveredResourceCidrsPagesWithContext(ctx aws.Context, input *GetIpamDiscoveredResourceCidrsInput, fn func(*GetIpamDiscoveredResourceCidrsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *GetIpamDiscoveredResourceCidrsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.GetIpamDiscoveredResourceCidrsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*GetIpamDiscoveredResourceCidrsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opGetIpamPoolAllocations = "GetIpamPoolAllocations"
// GetIpamPoolAllocationsRequest generates a "aws/request.Request" representing the
@@ -37647,6 +40169,11 @@ func (c *EC2) GetIpamPoolAllocationsRequest(input *GetIpamPoolAllocationsInput)
//
// Get a list of all the CIDR allocations in an IPAM pool.
//
+// If you use this action after AllocateIpamPoolCidr (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AllocateIpamPoolCidr.html)
+// or ReleaseIpamPoolAllocation (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ReleaseIpamPoolAllocation.html),
+// note that all EC2 API actions follow an eventual consistency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/query-api-troubleshooting.html#eventual-consistency)
+// model.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -37905,7 +40432,11 @@ func (c *EC2) GetIpamResourceCidrsRequest(input *GetIpamResourceCidrsInput) (req
// GetIpamResourceCidrs API operation for Amazon Elastic Compute Cloud.
//
-// Get information about the resources in a scope.
+// Returns resource CIDRs managed by IPAM in a given scope. If an IPAM is associated
+// with more than one resource discovery, the resource CIDRs across all of the
+// resource discoveries is returned. A resource discovery is an IPAM component
+// that enables IPAM to manage and monitor resources that belong to the owning
+// account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -39785,6 +42316,152 @@ func (c *EC2) GetTransitGatewayRouteTablePropagationsPagesWithContext(ctx aws.Co
return p.Err()
}
+const opGetVerifiedAccessEndpointPolicy = "GetVerifiedAccessEndpointPolicy"
+
+// GetVerifiedAccessEndpointPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the GetVerifiedAccessEndpointPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetVerifiedAccessEndpointPolicy for more information on using the GetVerifiedAccessEndpointPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the GetVerifiedAccessEndpointPolicyRequest method.
+// req, resp := client.GetVerifiedAccessEndpointPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetVerifiedAccessEndpointPolicy
+func (c *EC2) GetVerifiedAccessEndpointPolicyRequest(input *GetVerifiedAccessEndpointPolicyInput) (req *request.Request, output *GetVerifiedAccessEndpointPolicyOutput) {
+ op := &request.Operation{
+ Name: opGetVerifiedAccessEndpointPolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetVerifiedAccessEndpointPolicyInput{}
+ }
+
+ output = &GetVerifiedAccessEndpointPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetVerifiedAccessEndpointPolicy API operation for Amazon Elastic Compute Cloud.
+//
+// Get the Verified Access policy associated with the endpoint.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetVerifiedAccessEndpointPolicy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetVerifiedAccessEndpointPolicy
+func (c *EC2) GetVerifiedAccessEndpointPolicy(input *GetVerifiedAccessEndpointPolicyInput) (*GetVerifiedAccessEndpointPolicyOutput, error) {
+ req, out := c.GetVerifiedAccessEndpointPolicyRequest(input)
+ return out, req.Send()
+}
+
+// GetVerifiedAccessEndpointPolicyWithContext is the same as GetVerifiedAccessEndpointPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetVerifiedAccessEndpointPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetVerifiedAccessEndpointPolicyWithContext(ctx aws.Context, input *GetVerifiedAccessEndpointPolicyInput, opts ...request.Option) (*GetVerifiedAccessEndpointPolicyOutput, error) {
+ req, out := c.GetVerifiedAccessEndpointPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetVerifiedAccessGroupPolicy = "GetVerifiedAccessGroupPolicy"
+
+// GetVerifiedAccessGroupPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the GetVerifiedAccessGroupPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetVerifiedAccessGroupPolicy for more information on using the GetVerifiedAccessGroupPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the GetVerifiedAccessGroupPolicyRequest method.
+// req, resp := client.GetVerifiedAccessGroupPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetVerifiedAccessGroupPolicy
+func (c *EC2) GetVerifiedAccessGroupPolicyRequest(input *GetVerifiedAccessGroupPolicyInput) (req *request.Request, output *GetVerifiedAccessGroupPolicyOutput) {
+ op := &request.Operation{
+ Name: opGetVerifiedAccessGroupPolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetVerifiedAccessGroupPolicyInput{}
+ }
+
+ output = &GetVerifiedAccessGroupPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetVerifiedAccessGroupPolicy API operation for Amazon Elastic Compute Cloud.
+//
+// Shows the contents of the Verified Access policy associated with the group.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetVerifiedAccessGroupPolicy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetVerifiedAccessGroupPolicy
+func (c *EC2) GetVerifiedAccessGroupPolicy(input *GetVerifiedAccessGroupPolicyInput) (*GetVerifiedAccessGroupPolicyOutput, error) {
+ req, out := c.GetVerifiedAccessGroupPolicyRequest(input)
+ return out, req.Send()
+}
+
+// GetVerifiedAccessGroupPolicyWithContext is the same as GetVerifiedAccessGroupPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetVerifiedAccessGroupPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetVerifiedAccessGroupPolicyWithContext(ctx aws.Context, input *GetVerifiedAccessGroupPolicyInput, opts ...request.Option) (*GetVerifiedAccessGroupPolicyOutput, error) {
+ req, out := c.GetVerifiedAccessGroupPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opGetVpnConnectionDeviceSampleConfiguration = "GetVpnConnectionDeviceSampleConfiguration"
// GetVpnConnectionDeviceSampleConfigurationRequest generates a "aws/request.Request" representing the
@@ -39994,6 +42671,79 @@ func (c *EC2) GetVpnConnectionDeviceTypesPagesWithContext(ctx aws.Context, input
return p.Err()
}
+const opGetVpnTunnelReplacementStatus = "GetVpnTunnelReplacementStatus"
+
+// GetVpnTunnelReplacementStatusRequest generates a "aws/request.Request" representing the
+// client's request for the GetVpnTunnelReplacementStatus operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetVpnTunnelReplacementStatus for more information on using the GetVpnTunnelReplacementStatus
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the GetVpnTunnelReplacementStatusRequest method.
+// req, resp := client.GetVpnTunnelReplacementStatusRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetVpnTunnelReplacementStatus
+func (c *EC2) GetVpnTunnelReplacementStatusRequest(input *GetVpnTunnelReplacementStatusInput) (req *request.Request, output *GetVpnTunnelReplacementStatusOutput) {
+ op := &request.Operation{
+ Name: opGetVpnTunnelReplacementStatus,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetVpnTunnelReplacementStatusInput{}
+ }
+
+ output = &GetVpnTunnelReplacementStatusOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetVpnTunnelReplacementStatus API operation for Amazon Elastic Compute Cloud.
+//
+// Get details of available tunnel endpoint maintenance.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetVpnTunnelReplacementStatus for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetVpnTunnelReplacementStatus
+func (c *EC2) GetVpnTunnelReplacementStatus(input *GetVpnTunnelReplacementStatusInput) (*GetVpnTunnelReplacementStatusOutput, error) {
+ req, out := c.GetVpnTunnelReplacementStatusRequest(input)
+ return out, req.Send()
+}
+
+// GetVpnTunnelReplacementStatusWithContext is the same as GetVpnTunnelReplacementStatus with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetVpnTunnelReplacementStatus for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetVpnTunnelReplacementStatusWithContext(ctx aws.Context, input *GetVpnTunnelReplacementStatusInput, opts ...request.Option) (*GetVpnTunnelReplacementStatusOutput, error) {
+ req, out := c.GetVpnTunnelReplacementStatusRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opImportClientVpnClientCertificateRevocationList = "ImportClientVpnClientCertificateRevocationList"
// ImportClientVpnClientCertificateRevocationListRequest generates a "aws/request.Request" representing the
@@ -40114,6 +42864,11 @@ func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request,
// ImportImage API operation for Amazon Elastic Compute Cloud.
//
+// To import your virtual machines (VMs) with a console-based experience, you
+// can use the Import virtual machine images to Amazon Web Services template
+// in the Migration Hub Orchestrator console (https://console.aws.amazon.com/migrationhub/orchestrator).
+// For more information, see the Migration Hub Orchestrator User Guide (https://docs.aws.amazon.com/migrationhub-orchestrator/latest/userguide/import-vm-images.html).
+//
// Import single or multi-volume disk images or EBS snapshots into an Amazon
// Machine Image (AMI).
//
@@ -40532,7 +43287,7 @@ func (c *EC2) ListImagesInRecycleBinRequest(input *ListImagesInRecycleBinInput)
//
// Lists one or more AMIs that are currently in the Recycle Bin. For more information,
// see Recycle Bin (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recycle-bin.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -41796,8 +44551,10 @@ func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req
// ModifyImageAttribute API operation for Amazon Elastic Compute Cloud.
//
// Modifies the specified attribute of the specified AMI. You can specify only
-// one attribute at a time. You can use the Attribute parameter to specify the
-// attribute or one of the following parameters: Description or LaunchPermission.
+// one attribute at a time.
+//
+// To specify the attribute, you can use the Attribute parameter, or one of
+// the following parameters: Description, ImdsSupport, or LaunchPermission.
//
// Images with an Amazon Web Services Marketplace product code cannot be made
// public.
@@ -42708,6 +45465,81 @@ func (c *EC2) ModifyIpamResourceCidrWithContext(ctx aws.Context, input *ModifyIp
return out, req.Send()
}
+const opModifyIpamResourceDiscovery = "ModifyIpamResourceDiscovery"
+
+// ModifyIpamResourceDiscoveryRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyIpamResourceDiscovery operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyIpamResourceDiscovery for more information on using the ModifyIpamResourceDiscovery
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ModifyIpamResourceDiscoveryRequest method.
+// req, resp := client.ModifyIpamResourceDiscoveryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIpamResourceDiscovery
+func (c *EC2) ModifyIpamResourceDiscoveryRequest(input *ModifyIpamResourceDiscoveryInput) (req *request.Request, output *ModifyIpamResourceDiscoveryOutput) {
+ op := &request.Operation{
+ Name: opModifyIpamResourceDiscovery,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyIpamResourceDiscoveryInput{}
+ }
+
+ output = &ModifyIpamResourceDiscoveryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyIpamResourceDiscovery API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies a resource discovery. A resource discovery is an IPAM component
+// that enables IPAM to manage and monitor resources that belong to the owning
+// account.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyIpamResourceDiscovery for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIpamResourceDiscovery
+func (c *EC2) ModifyIpamResourceDiscovery(input *ModifyIpamResourceDiscoveryInput) (*ModifyIpamResourceDiscoveryOutput, error) {
+ req, out := c.ModifyIpamResourceDiscoveryRequest(input)
+ return out, req.Send()
+}
+
+// ModifyIpamResourceDiscoveryWithContext is the same as ModifyIpamResourceDiscovery with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyIpamResourceDiscovery for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyIpamResourceDiscoveryWithContext(ctx aws.Context, input *ModifyIpamResourceDiscoveryInput, opts ...request.Option) (*ModifyIpamResourceDiscoveryOutput, error) {
+ req, out := c.ModifyIpamResourceDiscoveryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opModifyIpamScope = "ModifyIpamScope"
// ModifyIpamScopeRequest generates a "aws/request.Request" representing the
@@ -44119,6 +46951,519 @@ func (c *EC2) ModifyTransitGatewayVpcAttachmentWithContext(ctx aws.Context, inpu
return out, req.Send()
}
+const opModifyVerifiedAccessEndpoint = "ModifyVerifiedAccessEndpoint"
+
+// ModifyVerifiedAccessEndpointRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyVerifiedAccessEndpoint operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyVerifiedAccessEndpoint for more information on using the ModifyVerifiedAccessEndpoint
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ModifyVerifiedAccessEndpointRequest method.
+// req, resp := client.ModifyVerifiedAccessEndpointRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessEndpoint
+func (c *EC2) ModifyVerifiedAccessEndpointRequest(input *ModifyVerifiedAccessEndpointInput) (req *request.Request, output *ModifyVerifiedAccessEndpointOutput) {
+ op := &request.Operation{
+ Name: opModifyVerifiedAccessEndpoint,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyVerifiedAccessEndpointInput{}
+ }
+
+ output = &ModifyVerifiedAccessEndpointOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyVerifiedAccessEndpoint API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the configuration of an Amazon Web Services Verified Access endpoint.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyVerifiedAccessEndpoint for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessEndpoint
+func (c *EC2) ModifyVerifiedAccessEndpoint(input *ModifyVerifiedAccessEndpointInput) (*ModifyVerifiedAccessEndpointOutput, error) {
+ req, out := c.ModifyVerifiedAccessEndpointRequest(input)
+ return out, req.Send()
+}
+
+// ModifyVerifiedAccessEndpointWithContext is the same as ModifyVerifiedAccessEndpoint with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyVerifiedAccessEndpoint for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyVerifiedAccessEndpointWithContext(ctx aws.Context, input *ModifyVerifiedAccessEndpointInput, opts ...request.Option) (*ModifyVerifiedAccessEndpointOutput, error) {
+ req, out := c.ModifyVerifiedAccessEndpointRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opModifyVerifiedAccessEndpointPolicy = "ModifyVerifiedAccessEndpointPolicy"
+
+// ModifyVerifiedAccessEndpointPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyVerifiedAccessEndpointPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyVerifiedAccessEndpointPolicy for more information on using the ModifyVerifiedAccessEndpointPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ModifyVerifiedAccessEndpointPolicyRequest method.
+// req, resp := client.ModifyVerifiedAccessEndpointPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessEndpointPolicy
+func (c *EC2) ModifyVerifiedAccessEndpointPolicyRequest(input *ModifyVerifiedAccessEndpointPolicyInput) (req *request.Request, output *ModifyVerifiedAccessEndpointPolicyOutput) {
+ op := &request.Operation{
+ Name: opModifyVerifiedAccessEndpointPolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyVerifiedAccessEndpointPolicyInput{}
+ }
+
+ output = &ModifyVerifiedAccessEndpointPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyVerifiedAccessEndpointPolicy API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the specified Verified Access endpoint policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyVerifiedAccessEndpointPolicy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessEndpointPolicy
+func (c *EC2) ModifyVerifiedAccessEndpointPolicy(input *ModifyVerifiedAccessEndpointPolicyInput) (*ModifyVerifiedAccessEndpointPolicyOutput, error) {
+ req, out := c.ModifyVerifiedAccessEndpointPolicyRequest(input)
+ return out, req.Send()
+}
+
+// ModifyVerifiedAccessEndpointPolicyWithContext is the same as ModifyVerifiedAccessEndpointPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyVerifiedAccessEndpointPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyVerifiedAccessEndpointPolicyWithContext(ctx aws.Context, input *ModifyVerifiedAccessEndpointPolicyInput, opts ...request.Option) (*ModifyVerifiedAccessEndpointPolicyOutput, error) {
+ req, out := c.ModifyVerifiedAccessEndpointPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opModifyVerifiedAccessGroup = "ModifyVerifiedAccessGroup"
+
+// ModifyVerifiedAccessGroupRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyVerifiedAccessGroup operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyVerifiedAccessGroup for more information on using the ModifyVerifiedAccessGroup
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ModifyVerifiedAccessGroupRequest method.
+// req, resp := client.ModifyVerifiedAccessGroupRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessGroup
+func (c *EC2) ModifyVerifiedAccessGroupRequest(input *ModifyVerifiedAccessGroupInput) (req *request.Request, output *ModifyVerifiedAccessGroupOutput) {
+ op := &request.Operation{
+ Name: opModifyVerifiedAccessGroup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyVerifiedAccessGroupInput{}
+ }
+
+ output = &ModifyVerifiedAccessGroupOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyVerifiedAccessGroup API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the specified Verified Access group configuration.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyVerifiedAccessGroup for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessGroup
+func (c *EC2) ModifyVerifiedAccessGroup(input *ModifyVerifiedAccessGroupInput) (*ModifyVerifiedAccessGroupOutput, error) {
+ req, out := c.ModifyVerifiedAccessGroupRequest(input)
+ return out, req.Send()
+}
+
+// ModifyVerifiedAccessGroupWithContext is the same as ModifyVerifiedAccessGroup with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyVerifiedAccessGroup for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyVerifiedAccessGroupWithContext(ctx aws.Context, input *ModifyVerifiedAccessGroupInput, opts ...request.Option) (*ModifyVerifiedAccessGroupOutput, error) {
+ req, out := c.ModifyVerifiedAccessGroupRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opModifyVerifiedAccessGroupPolicy = "ModifyVerifiedAccessGroupPolicy"
+
+// ModifyVerifiedAccessGroupPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyVerifiedAccessGroupPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyVerifiedAccessGroupPolicy for more information on using the ModifyVerifiedAccessGroupPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ModifyVerifiedAccessGroupPolicyRequest method.
+// req, resp := client.ModifyVerifiedAccessGroupPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessGroupPolicy
+func (c *EC2) ModifyVerifiedAccessGroupPolicyRequest(input *ModifyVerifiedAccessGroupPolicyInput) (req *request.Request, output *ModifyVerifiedAccessGroupPolicyOutput) {
+ op := &request.Operation{
+ Name: opModifyVerifiedAccessGroupPolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyVerifiedAccessGroupPolicyInput{}
+ }
+
+ output = &ModifyVerifiedAccessGroupPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyVerifiedAccessGroupPolicy API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the specified Verified Access group policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyVerifiedAccessGroupPolicy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessGroupPolicy
+func (c *EC2) ModifyVerifiedAccessGroupPolicy(input *ModifyVerifiedAccessGroupPolicyInput) (*ModifyVerifiedAccessGroupPolicyOutput, error) {
+ req, out := c.ModifyVerifiedAccessGroupPolicyRequest(input)
+ return out, req.Send()
+}
+
+// ModifyVerifiedAccessGroupPolicyWithContext is the same as ModifyVerifiedAccessGroupPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyVerifiedAccessGroupPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyVerifiedAccessGroupPolicyWithContext(ctx aws.Context, input *ModifyVerifiedAccessGroupPolicyInput, opts ...request.Option) (*ModifyVerifiedAccessGroupPolicyOutput, error) {
+ req, out := c.ModifyVerifiedAccessGroupPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opModifyVerifiedAccessInstance = "ModifyVerifiedAccessInstance"
+
+// ModifyVerifiedAccessInstanceRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyVerifiedAccessInstance operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyVerifiedAccessInstance for more information on using the ModifyVerifiedAccessInstance
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ModifyVerifiedAccessInstanceRequest method.
+// req, resp := client.ModifyVerifiedAccessInstanceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessInstance
+func (c *EC2) ModifyVerifiedAccessInstanceRequest(input *ModifyVerifiedAccessInstanceInput) (req *request.Request, output *ModifyVerifiedAccessInstanceOutput) {
+ op := &request.Operation{
+ Name: opModifyVerifiedAccessInstance,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyVerifiedAccessInstanceInput{}
+ }
+
+ output = &ModifyVerifiedAccessInstanceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyVerifiedAccessInstance API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the configuration of the specified Verified Access instance.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyVerifiedAccessInstance for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessInstance
+func (c *EC2) ModifyVerifiedAccessInstance(input *ModifyVerifiedAccessInstanceInput) (*ModifyVerifiedAccessInstanceOutput, error) {
+ req, out := c.ModifyVerifiedAccessInstanceRequest(input)
+ return out, req.Send()
+}
+
+// ModifyVerifiedAccessInstanceWithContext is the same as ModifyVerifiedAccessInstance with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyVerifiedAccessInstance for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyVerifiedAccessInstanceWithContext(ctx aws.Context, input *ModifyVerifiedAccessInstanceInput, opts ...request.Option) (*ModifyVerifiedAccessInstanceOutput, error) {
+ req, out := c.ModifyVerifiedAccessInstanceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opModifyVerifiedAccessInstanceLoggingConfiguration = "ModifyVerifiedAccessInstanceLoggingConfiguration"
+
+// ModifyVerifiedAccessInstanceLoggingConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyVerifiedAccessInstanceLoggingConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyVerifiedAccessInstanceLoggingConfiguration for more information on using the ModifyVerifiedAccessInstanceLoggingConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ModifyVerifiedAccessInstanceLoggingConfigurationRequest method.
+// req, resp := client.ModifyVerifiedAccessInstanceLoggingConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessInstanceLoggingConfiguration
+func (c *EC2) ModifyVerifiedAccessInstanceLoggingConfigurationRequest(input *ModifyVerifiedAccessInstanceLoggingConfigurationInput) (req *request.Request, output *ModifyVerifiedAccessInstanceLoggingConfigurationOutput) {
+ op := &request.Operation{
+ Name: opModifyVerifiedAccessInstanceLoggingConfiguration,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyVerifiedAccessInstanceLoggingConfigurationInput{}
+ }
+
+ output = &ModifyVerifiedAccessInstanceLoggingConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyVerifiedAccessInstanceLoggingConfiguration API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the logging configuration for the specified Amazon Web Services
+// Verified Access instance.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyVerifiedAccessInstanceLoggingConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessInstanceLoggingConfiguration
+func (c *EC2) ModifyVerifiedAccessInstanceLoggingConfiguration(input *ModifyVerifiedAccessInstanceLoggingConfigurationInput) (*ModifyVerifiedAccessInstanceLoggingConfigurationOutput, error) {
+ req, out := c.ModifyVerifiedAccessInstanceLoggingConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// ModifyVerifiedAccessInstanceLoggingConfigurationWithContext is the same as ModifyVerifiedAccessInstanceLoggingConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyVerifiedAccessInstanceLoggingConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyVerifiedAccessInstanceLoggingConfigurationWithContext(ctx aws.Context, input *ModifyVerifiedAccessInstanceLoggingConfigurationInput, opts ...request.Option) (*ModifyVerifiedAccessInstanceLoggingConfigurationOutput, error) {
+ req, out := c.ModifyVerifiedAccessInstanceLoggingConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opModifyVerifiedAccessTrustProvider = "ModifyVerifiedAccessTrustProvider"
+
+// ModifyVerifiedAccessTrustProviderRequest generates a "aws/request.Request" representing the
+// client's request for the ModifyVerifiedAccessTrustProvider operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ModifyVerifiedAccessTrustProvider for more information on using the ModifyVerifiedAccessTrustProvider
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ModifyVerifiedAccessTrustProviderRequest method.
+// req, resp := client.ModifyVerifiedAccessTrustProviderRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessTrustProvider
+func (c *EC2) ModifyVerifiedAccessTrustProviderRequest(input *ModifyVerifiedAccessTrustProviderInput) (req *request.Request, output *ModifyVerifiedAccessTrustProviderOutput) {
+ op := &request.Operation{
+ Name: opModifyVerifiedAccessTrustProvider,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ModifyVerifiedAccessTrustProviderInput{}
+ }
+
+ output = &ModifyVerifiedAccessTrustProviderOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ModifyVerifiedAccessTrustProvider API operation for Amazon Elastic Compute Cloud.
+//
+// Modifies the configuration of the specified Amazon Web Services Verified
+// Access trust provider.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ModifyVerifiedAccessTrustProvider for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVerifiedAccessTrustProvider
+func (c *EC2) ModifyVerifiedAccessTrustProvider(input *ModifyVerifiedAccessTrustProviderInput) (*ModifyVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.ModifyVerifiedAccessTrustProviderRequest(input)
+ return out, req.Send()
+}
+
+// ModifyVerifiedAccessTrustProviderWithContext is the same as ModifyVerifiedAccessTrustProvider with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ModifyVerifiedAccessTrustProvider for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ModifyVerifiedAccessTrustProviderWithContext(ctx aws.Context, input *ModifyVerifiedAccessTrustProviderInput, opts ...request.Option) (*ModifyVerifiedAccessTrustProviderOutput, error) {
+ req, out := c.ModifyVerifiedAccessTrustProviderRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opModifyVolume = "ModifyVolume"
// ModifyVolumeRequest generates a "aws/request.Request" representing the
@@ -44721,8 +48066,8 @@ func (c *EC2) ModifyVpcEndpointServicePermissionsRequest(input *ModifyVpcEndpoin
// ModifyVpcEndpointServicePermissions API operation for Amazon Elastic Compute Cloud.
//
// Modifies the permissions for your VPC endpoint service. You can add or remove
-// permissions for service consumers (IAM users, IAM roles, and Amazon Web Services
-// accounts) to connect to your endpoint service.
+// permissions for service consumers (Amazon Web Services accounts, users, and
+// IAM roles) to connect to your endpoint service.
//
// If you grant permissions to all principals, the service is public. Any users
// who know the name of a public service can send a request to attach an endpoint.
@@ -44820,14 +48165,14 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringCo
// If the peered VPCs are in the same Amazon Web Services account, you can enable
// DNS resolution for queries from the local VPC. This ensures that queries
// from the local VPC resolve to private IP addresses in the peer VPC. This
-// option is not available if the peered VPCs are in different different Amazon
-// Web Services accounts or different Regions. For peered VPCs in different
-// Amazon Web Services accounts, each Amazon Web Services account owner must
-// initiate a separate request to modify the peering connection options. For
-// inter-region peering connections, you must use the Region for the requester
-// VPC to modify the requester VPC peering options and the Region for the accepter
-// VPC to modify the accepter VPC peering options. To verify which VPCs are
-// the accepter and the requester for a VPC peering connection, use the DescribeVpcPeeringConnections
+// option is not available if the peered VPCs are in different Amazon Web Services
+// accounts or different Regions. For peered VPCs in different Amazon Web Services
+// accounts, each Amazon Web Services account owner must initiate a separate
+// request to modify the peering connection options. For inter-region peering
+// connections, you must use the Region for the requester VPC to modify the
+// requester VPC peering options and the Region for the accepter VPC to modify
+// the accepter VPC peering options. To verify which VPCs are the accepter and
+// the requester for a VPC peering connection, use the DescribeVpcPeeringConnections
// command.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -45486,13 +48831,12 @@ func (c *EC2) MoveByoipCidrToIpamRequest(input *MoveByoipCidrToIpamInput) (req *
// MoveByoipCidrToIpam API operation for Amazon Elastic Compute Cloud.
//
-// Move an BYOIP IPv4 CIDR to IPAM from a public IPv4 pool.
+// Move a BYOIPv4 CIDR to IPAM from a public IPv4 pool.
//
-// If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can
-// move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR
-// to IPAM. If you are bringing a new IP address to Amazon Web Services for
-// the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM
-// (https://docs.aws.amazon.com/vpc/latest/ipam/tutorials-byoip-ipam.html).
+// If you already have a BYOIPv4 CIDR with Amazon Web Services, you can move
+// the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to
+// IPAM. If you are bringing a new IP address to Amazon Web Services for the
+// first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM (https://docs.aws.amazon.com/vpc/latest/ipam/tutorials-byoip-ipam.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -46194,7 +49538,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ
// Instance will not be applied to the On-Demand Instance. For information about
// how to obtain the platform details and billing information of an AMI, see
// Understand AMI billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html)
-// in the Amazon Elastic Compute Cloud User Guide.
+// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -46729,8 +50073,7 @@ func (c *EC2) RejectVpcEndpointConnectionsRequest(input *RejectVpcEndpointConnec
// RejectVpcEndpointConnections API operation for Amazon Elastic Compute Cloud.
//
-// Rejects one or more VPC endpoint connection requests to your VPC endpoint
-// service.
+// Rejects VPC endpoint connection requests to your VPC endpoint service.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -47068,6 +50411,9 @@ func (c *EC2) ReleaseIpamPoolAllocationRequest(input *ReleaseIpamPoolAllocationI
// For more information, see Release an allocation (https://docs.aws.amazon.com/vpc/latest/ipam/release-pool-alloc-ipam.html)
// in the Amazon VPC IPAM User Guide.
//
+// All EC2 API actions follow an eventual consistency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/query-api-troubleshooting.html#eventual-consistency)
+// model.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -47563,6 +50909,79 @@ func (c *EC2) ReplaceTransitGatewayRouteWithContext(ctx aws.Context, input *Repl
return out, req.Send()
}
+const opReplaceVpnTunnel = "ReplaceVpnTunnel"
+
+// ReplaceVpnTunnelRequest generates a "aws/request.Request" representing the
+// client's request for the ReplaceVpnTunnel operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ReplaceVpnTunnel for more information on using the ReplaceVpnTunnel
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ReplaceVpnTunnelRequest method.
+// req, resp := client.ReplaceVpnTunnelRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceVpnTunnel
+func (c *EC2) ReplaceVpnTunnelRequest(input *ReplaceVpnTunnelInput) (req *request.Request, output *ReplaceVpnTunnelOutput) {
+ op := &request.Operation{
+ Name: opReplaceVpnTunnel,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ReplaceVpnTunnelInput{}
+ }
+
+ output = &ReplaceVpnTunnelOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ReplaceVpnTunnel API operation for Amazon Elastic Compute Cloud.
+//
+// Trigger replacement of specified VPN tunnel.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation ReplaceVpnTunnel for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceVpnTunnel
+func (c *EC2) ReplaceVpnTunnel(input *ReplaceVpnTunnelInput) (*ReplaceVpnTunnelOutput, error) {
+ req, out := c.ReplaceVpnTunnelRequest(input)
+ return out, req.Send()
+}
+
+// ReplaceVpnTunnelWithContext is the same as ReplaceVpnTunnel with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ReplaceVpnTunnel for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) ReplaceVpnTunnelWithContext(ctx aws.Context, input *ReplaceVpnTunnelInput, opts ...request.Option) (*ReplaceVpnTunnelOutput, error) {
+ req, out := c.ReplaceVpnTunnelRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opReportInstanceStatus = "ReportInstanceStatus"
// ReportInstanceStatusRequest generates a "aws/request.Request" representing the
@@ -48492,7 +51911,7 @@ func (c *EC2) RestoreImageFromRecycleBinRequest(input *RestoreImageFromRecycleBi
//
// Restores an AMI from the Recycle Bin. For more information, see Recycle Bin
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recycle-bin.html) in
-// the Amazon Elastic Compute Cloud User Guide.
+// the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -50439,6 +53858,92 @@ func (c *EC2) UnassignPrivateIpAddressesWithContext(ctx aws.Context, input *Unas
return out, req.Send()
}
+const opUnassignPrivateNatGatewayAddress = "UnassignPrivateNatGatewayAddress"
+
+// UnassignPrivateNatGatewayAddressRequest generates a "aws/request.Request" representing the
+// client's request for the UnassignPrivateNatGatewayAddress operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UnassignPrivateNatGatewayAddress for more information on using the UnassignPrivateNatGatewayAddress
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the UnassignPrivateNatGatewayAddressRequest method.
+// req, resp := client.UnassignPrivateNatGatewayAddressRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignPrivateNatGatewayAddress
+func (c *EC2) UnassignPrivateNatGatewayAddressRequest(input *UnassignPrivateNatGatewayAddressInput) (req *request.Request, output *UnassignPrivateNatGatewayAddressOutput) {
+ op := &request.Operation{
+ Name: opUnassignPrivateNatGatewayAddress,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UnassignPrivateNatGatewayAddressInput{}
+ }
+
+ output = &UnassignPrivateNatGatewayAddressOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UnassignPrivateNatGatewayAddress API operation for Amazon Elastic Compute Cloud.
+//
+// Unassigns secondary private IPv4 addresses from a private NAT gateway. You
+// cannot unassign your primary private IP. For more information, see Edit secondary
+// IP address associations (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-edit-secondary)
+// in the Amazon Virtual Private Cloud User Guide.
+//
+// While unassigning is in progress, you cannot assign/unassign additional IP
+// addresses while the connections are being drained. You are, however, allowed
+// to delete the NAT gateway.
+//
+// A private IP address will only be released at the end of MaxDrainDurationSeconds.
+// The private IP addresses stay associated and support the existing connections
+// but do not support any new connections (new connections are distributed across
+// the remaining assigned private IP address). After the existing connections
+// drain out, the private IP addresses get released.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation UnassignPrivateNatGatewayAddress for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignPrivateNatGatewayAddress
+func (c *EC2) UnassignPrivateNatGatewayAddress(input *UnassignPrivateNatGatewayAddressInput) (*UnassignPrivateNatGatewayAddressOutput, error) {
+ req, out := c.UnassignPrivateNatGatewayAddressRequest(input)
+ return out, req.Send()
+}
+
+// UnassignPrivateNatGatewayAddressWithContext is the same as UnassignPrivateNatGatewayAddress with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UnassignPrivateNatGatewayAddress for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) UnassignPrivateNatGatewayAddressWithContext(ctx aws.Context, input *UnassignPrivateNatGatewayAddressInput, opts ...request.Option) (*UnassignPrivateNatGatewayAddressOutput, error) {
+ req, out := c.UnassignPrivateNatGatewayAddressRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opUnmonitorInstances = "UnmonitorInstances"
// UnmonitorInstancesRequest generates a "aws/request.Request" representing the
@@ -51418,7 +54923,7 @@ type AcceptVpcEndpointConnectionsInput struct {
// ServiceId is a required field
ServiceId *string `type:"string" required:"true"`
- // The IDs of one or more interface VPC endpoints.
+ // The IDs of the interface VPC endpoints.
//
// VpcEndpointIds is a required field
VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"`
@@ -51518,7 +55023,9 @@ type AcceptVpcPeeringConnectionInput struct {
// The ID of the VPC peering connection. You must specify this parameter in
// the request.
- VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
+ //
+ // VpcPeeringConnectionId is a required field
+ VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"`
}
// String returns the string representation.
@@ -51539,6 +55046,19 @@ func (s AcceptVpcPeeringConnectionInput) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AcceptVpcPeeringConnectionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AcceptVpcPeeringConnectionInput"}
+ if s.VpcPeeringConnectionId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetDryRun sets the DryRun field's value.
func (s *AcceptVpcPeeringConnectionInput) SetDryRun(v bool) *AcceptVpcPeeringConnectionInput {
s.DryRun = &v
@@ -52030,15 +55550,34 @@ func (s *AddedPrincipal) SetServicePermissionId(v string) *AddedPrincipal {
return s
}
-// Describes an additional detail for a path analysis.
+// Describes an additional detail for a path analysis. For more information,
+// see Reachability Analyzer additional detail codes (https://docs.aws.amazon.com/vpc/latest/reachability/additional-detail-codes.html).
type AdditionalDetail struct {
_ struct{} `type:"structure"`
- // The information type.
+ // The additional detail code.
AdditionalDetailType *string `locationName:"additionalDetailType" type:"string"`
// The path component.
Component *AnalysisComponent `locationName:"component" type:"structure"`
+
+ // The load balancers.
+ LoadBalancers []*AnalysisComponent `locationName:"loadBalancerSet" locationNameList:"item" type:"list"`
+
+ // The rule options.
+ RuleGroupRuleOptionsPairs []*RuleGroupRuleOptionsPair `locationName:"ruleGroupRuleOptionsPairSet" locationNameList:"item" type:"list"`
+
+ // The rule group type.
+ RuleGroupTypePairs []*RuleGroupTypePair `locationName:"ruleGroupTypePairSet" locationNameList:"item" type:"list"`
+
+ // The rule options.
+ RuleOptions []*RuleOption `locationName:"ruleOptionSet" locationNameList:"item" type:"list"`
+
+ // The name of the VPC endpoint service.
+ ServiceName *string `locationName:"serviceName" type:"string"`
+
+ // The VPC endpoint service.
+ VpcEndpointService *AnalysisComponent `locationName:"vpcEndpointService" type:"structure"`
}
// String returns the string representation.
@@ -52071,6 +55610,42 @@ func (s *AdditionalDetail) SetComponent(v *AnalysisComponent) *AdditionalDetail
return s
}
+// SetLoadBalancers sets the LoadBalancers field's value.
+func (s *AdditionalDetail) SetLoadBalancers(v []*AnalysisComponent) *AdditionalDetail {
+ s.LoadBalancers = v
+ return s
+}
+
+// SetRuleGroupRuleOptionsPairs sets the RuleGroupRuleOptionsPairs field's value.
+func (s *AdditionalDetail) SetRuleGroupRuleOptionsPairs(v []*RuleGroupRuleOptionsPair) *AdditionalDetail {
+ s.RuleGroupRuleOptionsPairs = v
+ return s
+}
+
+// SetRuleGroupTypePairs sets the RuleGroupTypePairs field's value.
+func (s *AdditionalDetail) SetRuleGroupTypePairs(v []*RuleGroupTypePair) *AdditionalDetail {
+ s.RuleGroupTypePairs = v
+ return s
+}
+
+// SetRuleOptions sets the RuleOptions field's value.
+func (s *AdditionalDetail) SetRuleOptions(v []*RuleOption) *AdditionalDetail {
+ s.RuleOptions = v
+ return s
+}
+
+// SetServiceName sets the ServiceName field's value.
+func (s *AdditionalDetail) SetServiceName(v string) *AdditionalDetail {
+ s.ServiceName = &v
+ return s
+}
+
+// SetVpcEndpointService sets the VpcEndpointService field's value.
+func (s *AdditionalDetail) SetVpcEndpointService(v *AnalysisComponent) *AdditionalDetail {
+ s.VpcEndpointService = v
+ return s
+}
+
// Describes an Elastic IP address, or a carrier IP address.
type Address struct {
_ struct{} `type:"structure"`
@@ -52681,6 +56256,11 @@ type AllocateHostsInput struct {
// of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `locationName:"clientToken" type:"string"`
+ // Indicates whether to enable or disable host maintenance for the Dedicated
+ // Host. For more information, see Host maintenance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-maintenance.html)
+ // in the Amazon EC2 User Guide.
+ HostMaintenance *string `type:"string" enum:"HostMaintenance"`
+
// Indicates whether to enable or disable host recovery for the Dedicated Host.
// Host recovery is disabled by default. For more information, see Host recovery
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html)
@@ -52772,6 +56352,12 @@ func (s *AllocateHostsInput) SetClientToken(v string) *AllocateHostsInput {
return s
}
+// SetHostMaintenance sets the HostMaintenance field's value.
+func (s *AllocateHostsInput) SetHostMaintenance(v string) *AllocateHostsInput {
+ s.HostMaintenance = &v
+ return s
+}
+
// SetHostRecovery sets the HostRecovery field's value.
func (s *AllocateHostsInput) SetHostRecovery(v string) *AllocateHostsInput {
s.HostRecovery = &v
@@ -53416,6 +57002,12 @@ func (s *AnalysisPacketHeader) SetSourcePortRanges(v []*PortRange) *AnalysisPack
type AnalysisRouteTableRoute struct {
_ struct{} `type:"structure"`
+ // The ID of a carrier gateway.
+ CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"`
+
+ // The Amazon Resource Name (ARN) of a core network.
+ CoreNetworkArn *string `locationName:"coreNetworkArn" min:"1" type:"string"`
+
// The destination IPv4 address, in CIDR notation.
DestinationCidr *string `locationName:"destinationCidr" type:"string"`
@@ -53431,6 +57023,9 @@ type AnalysisRouteTableRoute struct {
// The ID of the instance, such as a NAT instance.
InstanceId *string `locationName:"instanceId" type:"string"`
+ // The ID of a local gateway.
+ LocalGatewayId *string `locationName:"localGatewayId" type:"string"`
+
// The ID of a NAT gateway.
NatGatewayId *string `locationName:"natGatewayId" type:"string"`
@@ -53479,6 +57074,18 @@ func (s AnalysisRouteTableRoute) GoString() string {
return s.String()
}
+// SetCarrierGatewayId sets the CarrierGatewayId field's value.
+func (s *AnalysisRouteTableRoute) SetCarrierGatewayId(v string) *AnalysisRouteTableRoute {
+ s.CarrierGatewayId = &v
+ return s
+}
+
+// SetCoreNetworkArn sets the CoreNetworkArn field's value.
+func (s *AnalysisRouteTableRoute) SetCoreNetworkArn(v string) *AnalysisRouteTableRoute {
+ s.CoreNetworkArn = &v
+ return s
+}
+
// SetDestinationCidr sets the DestinationCidr field's value.
func (s *AnalysisRouteTableRoute) SetDestinationCidr(v string) *AnalysisRouteTableRoute {
s.DestinationCidr = &v
@@ -53509,6 +57116,12 @@ func (s *AnalysisRouteTableRoute) SetInstanceId(v string) *AnalysisRouteTableRou
return s
}
+// SetLocalGatewayId sets the LocalGatewayId field's value.
+func (s *AnalysisRouteTableRoute) SetLocalGatewayId(v string) *AnalysisRouteTableRoute {
+ s.LocalGatewayId = &v
+ return s
+}
+
// SetNatGatewayId sets the NatGatewayId field's value.
func (s *AnalysisRouteTableRoute) SetNatGatewayId(v string) *AnalysisRouteTableRoute {
s.NatGatewayId = &v
@@ -54035,6 +57648,126 @@ func (s *AssignPrivateIpAddressesOutput) SetNetworkInterfaceId(v string) *Assign
return s
}
+type AssignPrivateNatGatewayAddressInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The NAT gateway ID.
+ //
+ // NatGatewayId is a required field
+ NatGatewayId *string `type:"string" required:"true"`
+
+ // The number of private IP addresses to assign to the NAT gateway. You can't
+ // specify this parameter when also specifying private IP addresses.
+ PrivateIpAddressCount *int64 `min:"1" type:"integer"`
+
+ // The private IPv4 addresses you want to assign to the private NAT gateway.
+ PrivateIpAddresses []*string `locationName:"PrivateIpAddress" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssignPrivateNatGatewayAddressInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssignPrivateNatGatewayAddressInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssignPrivateNatGatewayAddressInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssignPrivateNatGatewayAddressInput"}
+ if s.NatGatewayId == nil {
+ invalidParams.Add(request.NewErrParamRequired("NatGatewayId"))
+ }
+ if s.PrivateIpAddressCount != nil && *s.PrivateIpAddressCount < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("PrivateIpAddressCount", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *AssignPrivateNatGatewayAddressInput) SetDryRun(v bool) *AssignPrivateNatGatewayAddressInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetNatGatewayId sets the NatGatewayId field's value.
+func (s *AssignPrivateNatGatewayAddressInput) SetNatGatewayId(v string) *AssignPrivateNatGatewayAddressInput {
+ s.NatGatewayId = &v
+ return s
+}
+
+// SetPrivateIpAddressCount sets the PrivateIpAddressCount field's value.
+func (s *AssignPrivateNatGatewayAddressInput) SetPrivateIpAddressCount(v int64) *AssignPrivateNatGatewayAddressInput {
+ s.PrivateIpAddressCount = &v
+ return s
+}
+
+// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
+func (s *AssignPrivateNatGatewayAddressInput) SetPrivateIpAddresses(v []*string) *AssignPrivateNatGatewayAddressInput {
+ s.PrivateIpAddresses = v
+ return s
+}
+
+type AssignPrivateNatGatewayAddressOutput struct {
+ _ struct{} `type:"structure"`
+
+ // NAT gateway IP addresses.
+ NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"`
+
+ // The NAT gateway ID.
+ NatGatewayId *string `locationName:"natGatewayId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssignPrivateNatGatewayAddressOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssignPrivateNatGatewayAddressOutput) GoString() string {
+ return s.String()
+}
+
+// SetNatGatewayAddresses sets the NatGatewayAddresses field's value.
+func (s *AssignPrivateNatGatewayAddressOutput) SetNatGatewayAddresses(v []*NatGatewayAddress) *AssignPrivateNatGatewayAddressOutput {
+ s.NatGatewayAddresses = v
+ return s
+}
+
+// SetNatGatewayId sets the NatGatewayId field's value.
+func (s *AssignPrivateNatGatewayAddressOutput) SetNatGatewayId(v string) *AssignPrivateNatGatewayAddressOutput {
+ s.NatGatewayId = &v
+ return s
+}
+
// Describes the private IP addresses assigned to a network interface.
type AssignedPrivateIpAddress struct {
_ struct{} `type:"structure"`
@@ -54423,7 +58156,9 @@ type AssociateEnclaveCertificateIamRoleInput struct {
_ struct{} `type:"structure"`
// The ARN of the ACM certificate with which to associate the IAM role.
- CertificateArn *string `min:"1" type:"string"`
+ //
+ // CertificateArn is a required field
+ CertificateArn *string `type:"string" required:"true"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
@@ -54433,7 +58168,9 @@ type AssociateEnclaveCertificateIamRoleInput struct {
// The ARN of the IAM role to associate with the ACM certificate. You can associate
// up to 16 IAM roles with an ACM certificate.
- RoleArn *string `min:"1" type:"string"`
+ //
+ // RoleArn is a required field
+ RoleArn *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -54457,11 +58194,11 @@ func (s AssociateEnclaveCertificateIamRoleInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *AssociateEnclaveCertificateIamRoleInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "AssociateEnclaveCertificateIamRoleInput"}
- if s.CertificateArn != nil && len(*s.CertificateArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1))
+ if s.CertificateArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateArn"))
}
- if s.RoleArn != nil && len(*s.RoleArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1))
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
}
if invalidParams.Len() > 0 {
@@ -54732,6 +58469,249 @@ func (s *AssociateInstanceEventWindowOutput) SetInstanceEventWindow(v *InstanceE
return s
}
+type AssociateIpamResourceDiscoveryInput struct {
+ _ struct{} `type:"structure"`
+
+ // A client token.
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // An IPAM ID.
+ //
+ // IpamId is a required field
+ IpamId *string `type:"string" required:"true"`
+
+ // A resource discovery ID.
+ //
+ // IpamResourceDiscoveryId is a required field
+ IpamResourceDiscoveryId *string `type:"string" required:"true"`
+
+ // Tag specifications.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssociateIpamResourceDiscoveryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssociateIpamResourceDiscoveryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssociateIpamResourceDiscoveryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssociateIpamResourceDiscoveryInput"}
+ if s.IpamId == nil {
+ invalidParams.Add(request.NewErrParamRequired("IpamId"))
+ }
+ if s.IpamResourceDiscoveryId == nil {
+ invalidParams.Add(request.NewErrParamRequired("IpamResourceDiscoveryId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *AssociateIpamResourceDiscoveryInput) SetClientToken(v string) *AssociateIpamResourceDiscoveryInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *AssociateIpamResourceDiscoveryInput) SetDryRun(v bool) *AssociateIpamResourceDiscoveryInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetIpamId sets the IpamId field's value.
+func (s *AssociateIpamResourceDiscoveryInput) SetIpamId(v string) *AssociateIpamResourceDiscoveryInput {
+ s.IpamId = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryId sets the IpamResourceDiscoveryId field's value.
+func (s *AssociateIpamResourceDiscoveryInput) SetIpamResourceDiscoveryId(v string) *AssociateIpamResourceDiscoveryInput {
+ s.IpamResourceDiscoveryId = &v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *AssociateIpamResourceDiscoveryInput) SetTagSpecifications(v []*TagSpecification) *AssociateIpamResourceDiscoveryInput {
+ s.TagSpecifications = v
+ return s
+}
+
+type AssociateIpamResourceDiscoveryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A resource discovery association. An associated resource discovery is a resource
+ // discovery that has been associated with an IPAM.
+ IpamResourceDiscoveryAssociation *IpamResourceDiscoveryAssociation `locationName:"ipamResourceDiscoveryAssociation" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssociateIpamResourceDiscoveryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssociateIpamResourceDiscoveryOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamResourceDiscoveryAssociation sets the IpamResourceDiscoveryAssociation field's value.
+func (s *AssociateIpamResourceDiscoveryOutput) SetIpamResourceDiscoveryAssociation(v *IpamResourceDiscoveryAssociation) *AssociateIpamResourceDiscoveryOutput {
+ s.IpamResourceDiscoveryAssociation = v
+ return s
+}
+
+type AssociateNatGatewayAddressInput struct {
+ _ struct{} `type:"structure"`
+
+ // The allocation IDs of EIPs that you want to associate with your NAT gateway.
+ //
+ // AllocationIds is a required field
+ AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The NAT gateway ID.
+ //
+ // NatGatewayId is a required field
+ NatGatewayId *string `type:"string" required:"true"`
+
+ // The private IPv4 addresses that you want to assign to the NAT gateway.
+ PrivateIpAddresses []*string `locationName:"PrivateIpAddress" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssociateNatGatewayAddressInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssociateNatGatewayAddressInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssociateNatGatewayAddressInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssociateNatGatewayAddressInput"}
+ if s.AllocationIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllocationIds"))
+ }
+ if s.NatGatewayId == nil {
+ invalidParams.Add(request.NewErrParamRequired("NatGatewayId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAllocationIds sets the AllocationIds field's value.
+func (s *AssociateNatGatewayAddressInput) SetAllocationIds(v []*string) *AssociateNatGatewayAddressInput {
+ s.AllocationIds = v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *AssociateNatGatewayAddressInput) SetDryRun(v bool) *AssociateNatGatewayAddressInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetNatGatewayId sets the NatGatewayId field's value.
+func (s *AssociateNatGatewayAddressInput) SetNatGatewayId(v string) *AssociateNatGatewayAddressInput {
+ s.NatGatewayId = &v
+ return s
+}
+
+// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
+func (s *AssociateNatGatewayAddressInput) SetPrivateIpAddresses(v []*string) *AssociateNatGatewayAddressInput {
+ s.PrivateIpAddresses = v
+ return s
+}
+
+type AssociateNatGatewayAddressOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The IP addresses.
+ NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"`
+
+ // The NAT gateway ID.
+ NatGatewayId *string `locationName:"natGatewayId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssociateNatGatewayAddressOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AssociateNatGatewayAddressOutput) GoString() string {
+ return s.String()
+}
+
+// SetNatGatewayAddresses sets the NatGatewayAddresses field's value.
+func (s *AssociateNatGatewayAddressOutput) SetNatGatewayAddresses(v []*NatGatewayAddress) *AssociateNatGatewayAddressOutput {
+ s.NatGatewayAddresses = v
+ return s
+}
+
+// SetNatGatewayId sets the NatGatewayId field's value.
+func (s *AssociateNatGatewayAddressOutput) SetNatGatewayId(v string) *AssociateNatGatewayAddressOutput {
+ s.NatGatewayId = &v
+ return s
+}
+
type AssociateRouteTableInput struct {
_ struct{} `type:"structure"`
@@ -54959,14 +58939,20 @@ type AssociateTransitGatewayMulticastDomainInput struct {
DryRun *bool `type:"boolean"`
// The IDs of the subnets to associate with the transit gateway multicast domain.
- SubnetIds []*string `locationNameList:"item" type:"list"`
+ //
+ // SubnetIds is a required field
+ SubnetIds []*string `locationNameList:"item" type:"list" required:"true"`
// The ID of the transit gateway attachment to associate with the transit gateway
// multicast domain.
- TransitGatewayAttachmentId *string `type:"string"`
+ //
+ // TransitGatewayAttachmentId is a required field
+ TransitGatewayAttachmentId *string `type:"string" required:"true"`
// The ID of the transit gateway multicast domain.
- TransitGatewayMulticastDomainId *string `type:"string"`
+ //
+ // TransitGatewayMulticastDomainId is a required field
+ TransitGatewayMulticastDomainId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -54987,6 +58973,25 @@ func (s AssociateTransitGatewayMulticastDomainInput) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssociateTransitGatewayMulticastDomainInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssociateTransitGatewayMulticastDomainInput"}
+ if s.SubnetIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("SubnetIds"))
+ }
+ if s.TransitGatewayAttachmentId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId"))
+ }
+ if s.TransitGatewayMulticastDomainId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransitGatewayMulticastDomainId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetDryRun sets the DryRun field's value.
func (s *AssociateTransitGatewayMulticastDomainInput) SetDryRun(v bool) *AssociateTransitGatewayMulticastDomainInput {
s.DryRun = &v
@@ -56172,6 +60177,129 @@ func (s *AttachNetworkInterfaceOutput) SetNetworkCardIndex(v int64) *AttachNetwo
return s
}
+type AttachVerifiedAccessTrustProviderInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ //
+ // VerifiedAccessInstanceId is a required field
+ VerifiedAccessInstanceId *string `type:"string" required:"true"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ //
+ // VerifiedAccessTrustProviderId is a required field
+ VerifiedAccessTrustProviderId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AttachVerifiedAccessTrustProviderInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AttachVerifiedAccessTrustProviderInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttachVerifiedAccessTrustProviderInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AttachVerifiedAccessTrustProviderInput"}
+ if s.VerifiedAccessInstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessInstanceId"))
+ }
+ if s.VerifiedAccessTrustProviderId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessTrustProviderId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *AttachVerifiedAccessTrustProviderInput) SetClientToken(v string) *AttachVerifiedAccessTrustProviderInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *AttachVerifiedAccessTrustProviderInput) SetDryRun(v bool) *AttachVerifiedAccessTrustProviderInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *AttachVerifiedAccessTrustProviderInput) SetVerifiedAccessInstanceId(v string) *AttachVerifiedAccessTrustProviderInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+// SetVerifiedAccessTrustProviderId sets the VerifiedAccessTrustProviderId field's value.
+func (s *AttachVerifiedAccessTrustProviderInput) SetVerifiedAccessTrustProviderId(v string) *AttachVerifiedAccessTrustProviderInput {
+ s.VerifiedAccessTrustProviderId = &v
+ return s
+}
+
+type AttachVerifiedAccessTrustProviderOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AttachVerifiedAccessTrustProviderOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AttachVerifiedAccessTrustProviderOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessInstance sets the VerifiedAccessInstance field's value.
+func (s *AttachVerifiedAccessTrustProviderOutput) SetVerifiedAccessInstance(v *VerifiedAccessInstance) *AttachVerifiedAccessTrustProviderOutput {
+ s.VerifiedAccessInstance = v
+ return s
+}
+
+// SetVerifiedAccessTrustProvider sets the VerifiedAccessTrustProvider field's value.
+func (s *AttachVerifiedAccessTrustProviderOutput) SetVerifiedAccessTrustProvider(v *VerifiedAccessTrustProvider) *AttachVerifiedAccessTrustProviderOutput {
+ s.VerifiedAccessTrustProvider = v
+ return s
+}
+
type AttachVolumeInput struct {
_ struct{} `type:"structure"`
@@ -56920,9 +61048,9 @@ type AuthorizeSecurityGroupIngressInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
- // The start of port range for the TCP and UDP protocols, or an ICMP type number.
- // For the ICMP type number, use -1 to specify all types. If you specify all
- // ICMP types, you must specify all codes.
+ // If the protocol is TCP or UDP, this is the start of the port range. If the
+ // protocol is ICMP, this is the type number. A value of -1 indicates all ICMP
+ // types. If you specify all ICMP types, you must specify all ICMP codes.
//
// Alternatively, use a set of IP permissions to specify multiple rules and
// a description for the rule.
@@ -56974,9 +61102,9 @@ type AuthorizeSecurityGroupIngressInput struct {
// [VPC Only] The tags applied to the security group rule.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
- // The end of port range for the TCP and UDP protocols, or an ICMP code number.
- // For the ICMP code number, use -1 to specify all codes. If you specify all
- // ICMP types, you must specify all codes.
+ // If the protocol is TCP or UDP, this is the end of the port range. If the
+ // protocol is ICMP, this is the code. A value of -1 indicates all ICMP codes.
+ // If you specify all ICMP types, you must specify all ICMP codes.
//
// Alternatively, use a set of IP permissions to specify multiple rules and
// a description for the rule.
@@ -58668,8 +62796,11 @@ type CancelSpotFleetRequestsInput struct {
// SpotFleetRequestIds is a required field
SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"`
- // Indicates whether to terminate instances for a Spot Fleet request if it is
- // canceled successfully.
+ // Indicates whether to terminate the associated instances when the Spot Fleet
+ // request is canceled. The default is to terminate the instances.
+ //
+ // To let the instances continue to run after the Spot Fleet request is canceled,
+ // specify no-terminate-instances.
//
// TerminateInstances is a required field
TerminateInstances *bool `locationName:"terminateInstances" type:"boolean" required:"true"`
@@ -62231,7 +66362,7 @@ type CopyImageInput struct {
//
// For more information, see Copy AMIs from an Amazon Web Services Region to
// an Outpost (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#copy-amis)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
DestinationOutpostArn *string `type:"string"`
// Checks whether you have the required permissions for the action, without
@@ -62246,7 +66377,7 @@ type CopyImageInput struct {
// for Amazon EBS is used unless you specify a non-default Key Management Service
// (KMS) KMS key using KmsKeyId. For more information, see Amazon EBS encryption
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
Encrypted *bool `locationName:"encrypted" type:"boolean"`
// The identifier of the symmetric Key Management Service (KMS) KMS key to use
@@ -62492,7 +66623,11 @@ type CopySnapshotInput struct {
// in the Amazon Simple Storage Service API Reference. An invalid or improperly
// signed PresignedUrl will cause the copy operation to fail asynchronously,
// and the snapshot will move to an error state.
- PresignedUrl *string `locationName:"presignedUrl" type:"string"`
+ //
+ // PresignedUrl is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CopySnapshotInput's
+ // String and GoString methods.
+ PresignedUrl *string `locationName:"presignedUrl" type:"string" sensitive:"true"`
// The ID of the Region that contains the snapshot to be copied.
//
@@ -63417,7 +67552,8 @@ type CreateClientVpnEndpointInput struct {
// addresses. The address range cannot overlap with the local CIDR of the VPC
// in which the associated subnet is located, or the routes that you add manually.
// The address range cannot be changed after the Client VPN endpoint has been
- // created. The CIDR block should be /22 or greater.
+ // created. Client CIDR range must have a size of at least /22 and must not
+ // be greater than /12.
//
// ClientCidrBlock is a required field
ClientCidrBlock *string `type:"string" required:"true"`
@@ -64068,9 +68204,7 @@ type CreateCustomerGatewayInput struct {
// For devices that support BGP, the customer gateway's BGP ASN.
//
// Default: 65000
- //
- // BgpAsn is a required field
- BgpAsn *int64 `type:"integer" required:"true"`
+ BgpAsn *int64 `type:"integer"`
// The Amazon Resource Name (ARN) for the customer gateway certificate.
CertificateArn *string `type:"string"`
@@ -64124,9 +68258,6 @@ func (s CreateCustomerGatewayInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateCustomerGatewayInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateCustomerGatewayInput"}
- if s.BgpAsn == nil {
- invalidParams.Add(request.NewErrParamRequired("BgpAsn"))
- }
if s.Type == nil {
invalidParams.Add(request.NewErrParamRequired("Type"))
}
@@ -64681,6 +68812,8 @@ type CreateFleetInput struct {
// Indicates whether running instances should be terminated if the total target
// capacity of the EC2 Fleet is decreased below the current size of the EC2
// Fleet.
+ //
+ // Supported only for fleets of type maintain.
ExcessCapacityTerminationPolicy *string `type:"string" enum:"FleetExcessCapacityTerminationPolicy"`
// The configuration for the EC2 Fleet.
@@ -66097,6 +70230,15 @@ type CreateIpamPoolInput struct {
// Possible values: Any Amazon Web Services Region, such as us-east-1.
Locale *string `type:"string"`
+ // The IP address source for pools in the public scope. Only used for provisioning
+ // IP address CIDRs to pools in the public scope. Default is byoip. For more
+ // information, see Create IPv6 pools (https://docs.aws.amazon.com/vpc/latest/ipam/intro-create-ipv6-pools.html)
+ // in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided
+ // IPv6 CIDR block to a top-level IPv6 pool if PublicIpSource is amazon. For
+ // information on increasing the default limit, see Quotas for your IPAM (https://docs.aws.amazon.com/vpc/latest/ipam/quotas-ipam.html)
+ // in the Amazon VPC IPAM User Guide.
+ PublicIpSource *string `type:"string" enum:"IpamPoolPublicIpSource"`
+
// Determines if the pool is publicly advertisable. This option is not available
// for pools with AddressFamily set to ipv4.
PubliclyAdvertisable *bool `type:"boolean"`
@@ -66219,6 +70361,12 @@ func (s *CreateIpamPoolInput) SetLocale(v string) *CreateIpamPoolInput {
return s
}
+// SetPublicIpSource sets the PublicIpSource field's value.
+func (s *CreateIpamPoolInput) SetPublicIpSource(v string) *CreateIpamPoolInput {
+ s.PublicIpSource = &v
+ return s
+}
+
// SetPubliclyAdvertisable sets the PubliclyAdvertisable field's value.
func (s *CreateIpamPoolInput) SetPubliclyAdvertisable(v bool) *CreateIpamPoolInput {
s.PubliclyAdvertisable = &v
@@ -66268,6 +70416,110 @@ func (s *CreateIpamPoolOutput) SetIpamPool(v *IpamPool) *CreateIpamPoolOutput {
return s
}
+type CreateIpamResourceDiscoveryInput struct {
+ _ struct{} `type:"structure"`
+
+ // A client token for the IPAM resource discovery.
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the IPAM resource discovery.
+ Description *string `type:"string"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // Operating Regions for the IPAM resource discovery. Operating Regions are
+ // Amazon Web Services Regions where the IPAM is allowed to manage IP address
+ // CIDRs. IPAM only discovers and monitors resources in the Amazon Web Services
+ // Regions you select as operating Regions.
+ OperatingRegions []*AddIpamOperatingRegion `locationName:"OperatingRegion" type:"list"`
+
+ // Tag specifications for the IPAM resource discovery.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateIpamResourceDiscoveryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateIpamResourceDiscoveryInput) GoString() string {
+ return s.String()
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateIpamResourceDiscoveryInput) SetClientToken(v string) *CreateIpamResourceDiscoveryInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *CreateIpamResourceDiscoveryInput) SetDescription(v string) *CreateIpamResourceDiscoveryInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateIpamResourceDiscoveryInput) SetDryRun(v bool) *CreateIpamResourceDiscoveryInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetOperatingRegions sets the OperatingRegions field's value.
+func (s *CreateIpamResourceDiscoveryInput) SetOperatingRegions(v []*AddIpamOperatingRegion) *CreateIpamResourceDiscoveryInput {
+ s.OperatingRegions = v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateIpamResourceDiscoveryInput) SetTagSpecifications(v []*TagSpecification) *CreateIpamResourceDiscoveryInput {
+ s.TagSpecifications = v
+ return s
+}
+
+type CreateIpamResourceDiscoveryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An IPAM resource discovery.
+ IpamResourceDiscovery *IpamResourceDiscovery `locationName:"ipamResourceDiscovery" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateIpamResourceDiscoveryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateIpamResourceDiscoveryOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamResourceDiscovery sets the IpamResourceDiscovery field's value.
+func (s *CreateIpamResourceDiscoveryOutput) SetIpamResourceDiscovery(v *IpamResourceDiscovery) *CreateIpamResourceDiscoveryOutput {
+ s.IpamResourceDiscovery = v
+ return s
+}
+
type CreateIpamScopeInput struct {
_ struct{} `type:"structure"`
@@ -66754,6 +71006,14 @@ type CreateLaunchTemplateVersionInput struct {
// both.
LaunchTemplateName *string `min:"3" type:"string"`
+ // If true, and if a Systems Manager parameter is specified for ImageId, the
+ // AMI ID is displayed in the response for imageID. For more information, see
+ // Use a Systems Manager parameter instead of an AMI ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ //
+ // Default: false
+ ResolveAlias *bool `type:"boolean"`
+
// The version number of the launch template version on which to base the new
// version. The new version inherits the same launch parameters as the source
// version, except for parameters that you specify in LaunchTemplateData. Snapshots
@@ -66834,6 +71094,12 @@ func (s *CreateLaunchTemplateVersionInput) SetLaunchTemplateName(v string) *Crea
return s
}
+// SetResolveAlias sets the ResolveAlias field's value.
+func (s *CreateLaunchTemplateVersionInput) SetResolveAlias(v bool) *CreateLaunchTemplateVersionInput {
+ s.ResolveAlias = &v
+ return s
+}
+
// SetSourceVersion sets the SourceVersion field's value.
func (s *CreateLaunchTemplateVersionInput) SetSourceVersion(v string) *CreateLaunchTemplateVersionInput {
s.SourceVersion = &v
@@ -66893,9 +71159,12 @@ type CreateLocalGatewayRouteInput struct {
// The CIDR range used for destination matches. Routing decisions are based
// on the most specific match.
- //
- // DestinationCidrBlock is a required field
- DestinationCidrBlock *string `type:"string" required:"true"`
+ DestinationCidrBlock *string `type:"string"`
+
+ // The ID of the prefix list. Use a prefix list in place of DestinationCidrBlock.
+ // You cannot use DestinationPrefixListId and DestinationCidrBlock in the same
+ // request.
+ DestinationPrefixListId *string `type:"string"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
@@ -66936,9 +71205,6 @@ func (s CreateLocalGatewayRouteInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateLocalGatewayRouteInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateLocalGatewayRouteInput"}
- if s.DestinationCidrBlock == nil {
- invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock"))
- }
if s.LocalGatewayRouteTableId == nil {
invalidParams.Add(request.NewErrParamRequired("LocalGatewayRouteTableId"))
}
@@ -66955,6 +71221,12 @@ func (s *CreateLocalGatewayRouteInput) SetDestinationCidrBlock(v string) *Create
return s
}
+// SetDestinationPrefixListId sets the DestinationPrefixListId field's value.
+func (s *CreateLocalGatewayRouteInput) SetDestinationPrefixListId(v string) *CreateLocalGatewayRouteInput {
+ s.DestinationPrefixListId = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *CreateLocalGatewayRouteInput) SetDryRun(v bool) *CreateLocalGatewayRouteInput {
s.DryRun = &v
@@ -67532,7 +71804,23 @@ type CreateNatGatewayInput struct {
// an address, a private IPv4 address will be automatically assigned.
PrivateIpAddress *string `type:"string"`
- // The subnet in which to create the NAT gateway.
+ // Secondary EIP allocation IDs. For more information about secondary addresses,
+ // see Create a NAT gateway (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-creating)
+ // in the Amazon Virtual Private Cloud User Guide.
+ SecondaryAllocationIds []*string `locationName:"SecondaryAllocationId" locationNameList:"AllocationId" type:"list"`
+
+ // [Private NAT gateway only] The number of secondary private IPv4 addresses
+ // you want to assign to the NAT gateway. For more information about secondary
+ // addresses, see Create a NAT gateway (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-creating)
+ // in the Amazon Virtual Private Cloud User Guide.
+ SecondaryPrivateIpAddressCount *int64 `min:"1" type:"integer"`
+
+ // Secondary private IPv4 addresses. For more information about secondary addresses,
+ // see Create a NAT gateway (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-creating)
+ // in the Amazon Virtual Private Cloud User Guide.
+ SecondaryPrivateIpAddresses []*string `locationName:"SecondaryPrivateIpAddress" locationNameList:"item" type:"list"`
+
+ // The ID of the subnet in which to create the NAT gateway.
//
// SubnetId is a required field
SubnetId *string `type:"string" required:"true"`
@@ -67562,6 +71850,9 @@ func (s CreateNatGatewayInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateNatGatewayInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateNatGatewayInput"}
+ if s.SecondaryPrivateIpAddressCount != nil && *s.SecondaryPrivateIpAddressCount < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("SecondaryPrivateIpAddressCount", 1))
+ }
if s.SubnetId == nil {
invalidParams.Add(request.NewErrParamRequired("SubnetId"))
}
@@ -67602,6 +71893,24 @@ func (s *CreateNatGatewayInput) SetPrivateIpAddress(v string) *CreateNatGatewayI
return s
}
+// SetSecondaryAllocationIds sets the SecondaryAllocationIds field's value.
+func (s *CreateNatGatewayInput) SetSecondaryAllocationIds(v []*string) *CreateNatGatewayInput {
+ s.SecondaryAllocationIds = v
+ return s
+}
+
+// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value.
+func (s *CreateNatGatewayInput) SetSecondaryPrivateIpAddressCount(v int64) *CreateNatGatewayInput {
+ s.SecondaryPrivateIpAddressCount = &v
+ return s
+}
+
+// SetSecondaryPrivateIpAddresses sets the SecondaryPrivateIpAddresses field's value.
+func (s *CreateNatGatewayInput) SetSecondaryPrivateIpAddresses(v []*string) *CreateNatGatewayInput {
+ s.SecondaryPrivateIpAddresses = v
+ return s
+}
+
// SetSubnetId sets the SubnetId field's value.
func (s *CreateNatGatewayInput) SetSubnetId(v string) *CreateNatGatewayInput {
s.SubnetId = &v
@@ -68058,13 +72367,11 @@ type CreateNetworkInsightsPathInput struct {
// of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `type:"string" idempotencyToken:"true"`
- // The Amazon Web Services resource that is the destination of the path.
- //
- // Destination is a required field
- Destination *string `type:"string" required:"true"`
+ // The ID or ARN of the destination. If the resource is in another account,
+ // you must specify an ARN.
+ Destination *string `type:"string"`
- // The IP address of the Amazon Web Services resource that is the destination
- // of the path.
+ // The IP address of the destination.
DestinationIp *string `type:"string"`
// The destination port.
@@ -68076,18 +72383,28 @@ type CreateNetworkInsightsPathInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
+ // Scopes the analysis to network paths that match specific filters at the destination.
+ // If you specify this parameter, you can't specify the parameter for the destination
+ // IP address.
+ FilterAtDestination *PathRequestFilter `type:"structure"`
+
+ // Scopes the analysis to network paths that match specific filters at the source.
+ // If you specify this parameter, you can't specify the parameters for the source
+ // IP address or the destination port.
+ FilterAtSource *PathRequestFilter `type:"structure"`
+
// The protocol.
//
// Protocol is a required field
Protocol *string `type:"string" required:"true" enum:"Protocol"`
- // The Amazon Web Services resource that is the source of the path.
+ // The ID or ARN of the source. If the resource is in another account, you must
+ // specify an ARN.
//
// Source is a required field
Source *string `type:"string" required:"true"`
- // The IP address of the Amazon Web Services resource that is the source of
- // the path.
+ // The IP address of the source.
SourceIp *string `type:"string"`
// The tags to add to the path.
@@ -68115,9 +72432,6 @@ func (s CreateNetworkInsightsPathInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateNetworkInsightsPathInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateNetworkInsightsPathInput"}
- if s.Destination == nil {
- invalidParams.Add(request.NewErrParamRequired("Destination"))
- }
if s.Protocol == nil {
invalidParams.Add(request.NewErrParamRequired("Protocol"))
}
@@ -68161,6 +72475,18 @@ func (s *CreateNetworkInsightsPathInput) SetDryRun(v bool) *CreateNetworkInsight
return s
}
+// SetFilterAtDestination sets the FilterAtDestination field's value.
+func (s *CreateNetworkInsightsPathInput) SetFilterAtDestination(v *PathRequestFilter) *CreateNetworkInsightsPathInput {
+ s.FilterAtDestination = v
+ return s
+}
+
+// SetFilterAtSource sets the FilterAtSource field's value.
+func (s *CreateNetworkInsightsPathInput) SetFilterAtSource(v *PathRequestFilter) *CreateNetworkInsightsPathInput {
+ s.FilterAtSource = v
+ return s
+}
+
// SetProtocol sets the Protocol field's value.
func (s *CreateNetworkInsightsPathInput) SetProtocol(v string) *CreateNetworkInsightsPathInput {
s.Protocol = &v
@@ -69537,7 +73863,7 @@ func (s *CreateRouteTableOutput) SetRouteTable(v *RouteTable) *CreateRouteTableO
type CreateSecurityGroupInput struct {
_ struct{} `type:"structure"`
- // A description for the security group. This is informational only.
+ // A description for the security group.
//
// Constraints: Up to 255 characters in length
//
@@ -69844,6 +74170,11 @@ func (s *CreateSnapshotsInput) Validate() error {
if s.InstanceSpecification == nil {
invalidParams.Add(request.NewErrParamRequired("InstanceSpecification"))
}
+ if s.InstanceSpecification != nil {
+ if err := s.InstanceSpecification.Validate(); err != nil {
+ invalidParams.AddNested("InstanceSpecification", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -72701,6 +77032,891 @@ func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetIpv6Support(v strin
return s
}
+// Options for a network interface-type endpoint.
+type CreateVerifiedAccessEndpointEniOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the network interface.
+ NetworkInterfaceId *string `type:"string"`
+
+ // The IP port number.
+ Port *int64 `min:"1" type:"integer"`
+
+ // The IP protocol.
+ Protocol *string `type:"string" enum:"VerifiedAccessEndpointProtocol"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessEndpointEniOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessEndpointEniOptions) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateVerifiedAccessEndpointEniOptions) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateVerifiedAccessEndpointEniOptions"}
+ if s.Port != nil && *s.Port < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Port", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
+func (s *CreateVerifiedAccessEndpointEniOptions) SetNetworkInterfaceId(v string) *CreateVerifiedAccessEndpointEniOptions {
+ s.NetworkInterfaceId = &v
+ return s
+}
+
+// SetPort sets the Port field's value.
+func (s *CreateVerifiedAccessEndpointEniOptions) SetPort(v int64) *CreateVerifiedAccessEndpointEniOptions {
+ s.Port = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *CreateVerifiedAccessEndpointEniOptions) SetProtocol(v string) *CreateVerifiedAccessEndpointEniOptions {
+ s.Protocol = &v
+ return s
+}
+
+type CreateVerifiedAccessEndpointInput struct {
+ _ struct{} `type:"structure"`
+
+ // The DNS name for users to reach your application.
+ //
+ // ApplicationDomain is a required field
+ ApplicationDomain *string `type:"string" required:"true"`
+
+ // The Amazon Web Services network component Verified Access attaches to.
+ //
+ // AttachmentType is a required field
+ AttachmentType *string `type:"string" required:"true" enum:"VerifiedAccessEndpointAttachmentType"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the Amazon Web Services Verified Access endpoint.
+ Description *string `type:"string"`
+
+ // The ARN of the public TLS/SSL certificate in Amazon Web Services Certificate
+ // Manager to associate with the endpoint. The CN in the certificate must match
+ // the DNS name your end users will use to reach your application.
+ //
+ // DomainCertificateArn is a required field
+ DomainCertificateArn *string `type:"string" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // A custom identifier that gets prepended to a DNS name that is generated for
+ // the endpoint.
+ //
+ // EndpointDomainPrefix is a required field
+ EndpointDomainPrefix *string `type:"string" required:"true"`
+
+ // The type of Amazon Web Services Verified Access endpoint to create.
+ //
+ // EndpointType is a required field
+ EndpointType *string `type:"string" required:"true" enum:"VerifiedAccessEndpointType"`
+
+ // The load balancer details if creating the Amazon Web Services Verified Access
+ // endpoint as load-balancertype.
+ LoadBalancerOptions *CreateVerifiedAccessEndpointLoadBalancerOptions `type:"structure"`
+
+ // The network interface details if creating the Amazon Web Services Verified
+ // Access endpoint as network-interfacetype.
+ NetworkInterfaceOptions *CreateVerifiedAccessEndpointEniOptions `type:"structure"`
+
+ // The Amazon Web Services Verified Access policy document.
+ PolicyDocument *string `type:"string"`
+
+ // The Amazon EC2 security groups to associate with the Amazon Web Services
+ // Verified Access endpoint.
+ SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"`
+
+ // The tags to assign to the Amazon Web Services Verified Access endpoint.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+
+ // The ID of the Verified Access group to associate the endpoint with.
+ //
+ // VerifiedAccessGroupId is a required field
+ VerifiedAccessGroupId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessEndpointInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessEndpointInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateVerifiedAccessEndpointInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateVerifiedAccessEndpointInput"}
+ if s.ApplicationDomain == nil {
+ invalidParams.Add(request.NewErrParamRequired("ApplicationDomain"))
+ }
+ if s.AttachmentType == nil {
+ invalidParams.Add(request.NewErrParamRequired("AttachmentType"))
+ }
+ if s.DomainCertificateArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("DomainCertificateArn"))
+ }
+ if s.EndpointDomainPrefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("EndpointDomainPrefix"))
+ }
+ if s.EndpointType == nil {
+ invalidParams.Add(request.NewErrParamRequired("EndpointType"))
+ }
+ if s.VerifiedAccessGroupId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessGroupId"))
+ }
+ if s.LoadBalancerOptions != nil {
+ if err := s.LoadBalancerOptions.Validate(); err != nil {
+ invalidParams.AddNested("LoadBalancerOptions", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.NetworkInterfaceOptions != nil {
+ if err := s.NetworkInterfaceOptions.Validate(); err != nil {
+ invalidParams.AddNested("NetworkInterfaceOptions", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetApplicationDomain sets the ApplicationDomain field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetApplicationDomain(v string) *CreateVerifiedAccessEndpointInput {
+ s.ApplicationDomain = &v
+ return s
+}
+
+// SetAttachmentType sets the AttachmentType field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetAttachmentType(v string) *CreateVerifiedAccessEndpointInput {
+ s.AttachmentType = &v
+ return s
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetClientToken(v string) *CreateVerifiedAccessEndpointInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetDescription(v string) *CreateVerifiedAccessEndpointInput {
+ s.Description = &v
+ return s
+}
+
+// SetDomainCertificateArn sets the DomainCertificateArn field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetDomainCertificateArn(v string) *CreateVerifiedAccessEndpointInput {
+ s.DomainCertificateArn = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetDryRun(v bool) *CreateVerifiedAccessEndpointInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetEndpointDomainPrefix sets the EndpointDomainPrefix field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetEndpointDomainPrefix(v string) *CreateVerifiedAccessEndpointInput {
+ s.EndpointDomainPrefix = &v
+ return s
+}
+
+// SetEndpointType sets the EndpointType field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetEndpointType(v string) *CreateVerifiedAccessEndpointInput {
+ s.EndpointType = &v
+ return s
+}
+
+// SetLoadBalancerOptions sets the LoadBalancerOptions field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetLoadBalancerOptions(v *CreateVerifiedAccessEndpointLoadBalancerOptions) *CreateVerifiedAccessEndpointInput {
+ s.LoadBalancerOptions = v
+ return s
+}
+
+// SetNetworkInterfaceOptions sets the NetworkInterfaceOptions field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetNetworkInterfaceOptions(v *CreateVerifiedAccessEndpointEniOptions) *CreateVerifiedAccessEndpointInput {
+ s.NetworkInterfaceOptions = v
+ return s
+}
+
+// SetPolicyDocument sets the PolicyDocument field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetPolicyDocument(v string) *CreateVerifiedAccessEndpointInput {
+ s.PolicyDocument = &v
+ return s
+}
+
+// SetSecurityGroupIds sets the SecurityGroupIds field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetSecurityGroupIds(v []*string) *CreateVerifiedAccessEndpointInput {
+ s.SecurityGroupIds = v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetTagSpecifications(v []*TagSpecification) *CreateVerifiedAccessEndpointInput {
+ s.TagSpecifications = v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *CreateVerifiedAccessEndpointInput) SetVerifiedAccessGroupId(v string) *CreateVerifiedAccessEndpointInput {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+// Describes a load balancer when creating an Amazon Web Services Verified Access
+// endpoint using the load-balancer type.
+type CreateVerifiedAccessEndpointLoadBalancerOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the load balancer.
+ LoadBalancerArn *string `type:"string"`
+
+ // The IP port number.
+ Port *int64 `min:"1" type:"integer"`
+
+ // The IP protocol.
+ Protocol *string `type:"string" enum:"VerifiedAccessEndpointProtocol"`
+
+ // The IDs of the subnets.
+ SubnetIds []*string `locationName:"SubnetId" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessEndpointLoadBalancerOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessEndpointLoadBalancerOptions) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateVerifiedAccessEndpointLoadBalancerOptions) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateVerifiedAccessEndpointLoadBalancerOptions"}
+ if s.Port != nil && *s.Port < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Port", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLoadBalancerArn sets the LoadBalancerArn field's value.
+func (s *CreateVerifiedAccessEndpointLoadBalancerOptions) SetLoadBalancerArn(v string) *CreateVerifiedAccessEndpointLoadBalancerOptions {
+ s.LoadBalancerArn = &v
+ return s
+}
+
+// SetPort sets the Port field's value.
+func (s *CreateVerifiedAccessEndpointLoadBalancerOptions) SetPort(v int64) *CreateVerifiedAccessEndpointLoadBalancerOptions {
+ s.Port = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *CreateVerifiedAccessEndpointLoadBalancerOptions) SetProtocol(v string) *CreateVerifiedAccessEndpointLoadBalancerOptions {
+ s.Protocol = &v
+ return s
+}
+
+// SetSubnetIds sets the SubnetIds field's value.
+func (s *CreateVerifiedAccessEndpointLoadBalancerOptions) SetSubnetIds(v []*string) *CreateVerifiedAccessEndpointLoadBalancerOptions {
+ s.SubnetIds = v
+ return s
+}
+
+type CreateVerifiedAccessEndpointOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ VerifiedAccessEndpoint *VerifiedAccessEndpoint `locationName:"verifiedAccessEndpoint" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessEndpointOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessEndpointOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessEndpoint sets the VerifiedAccessEndpoint field's value.
+func (s *CreateVerifiedAccessEndpointOutput) SetVerifiedAccessEndpoint(v *VerifiedAccessEndpoint) *CreateVerifiedAccessEndpointOutput {
+ s.VerifiedAccessEndpoint = v
+ return s
+}
+
+type CreateVerifiedAccessGroupInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the Amazon Web Services Verified Access group.
+ Description *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The Amazon Web Services Verified Access policy document.
+ PolicyDocument *string `type:"string"`
+
+ // The tags to assign to the Amazon Web Services Verified Access group.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ //
+ // VerifiedAccessInstanceId is a required field
+ VerifiedAccessInstanceId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessGroupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessGroupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateVerifiedAccessGroupInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateVerifiedAccessGroupInput"}
+ if s.VerifiedAccessInstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessInstanceId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateVerifiedAccessGroupInput) SetClientToken(v string) *CreateVerifiedAccessGroupInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *CreateVerifiedAccessGroupInput) SetDescription(v string) *CreateVerifiedAccessGroupInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateVerifiedAccessGroupInput) SetDryRun(v bool) *CreateVerifiedAccessGroupInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetPolicyDocument sets the PolicyDocument field's value.
+func (s *CreateVerifiedAccessGroupInput) SetPolicyDocument(v string) *CreateVerifiedAccessGroupInput {
+ s.PolicyDocument = &v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateVerifiedAccessGroupInput) SetTagSpecifications(v []*TagSpecification) *CreateVerifiedAccessGroupInput {
+ s.TagSpecifications = v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *CreateVerifiedAccessGroupInput) SetVerifiedAccessInstanceId(v string) *CreateVerifiedAccessGroupInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+type CreateVerifiedAccessGroupOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Verified Access group.
+ VerifiedAccessGroup *VerifiedAccessGroup `locationName:"verifiedAccessGroup" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessGroupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessGroupOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessGroup sets the VerifiedAccessGroup field's value.
+func (s *CreateVerifiedAccessGroupOutput) SetVerifiedAccessGroup(v *VerifiedAccessGroup) *CreateVerifiedAccessGroupOutput {
+ s.VerifiedAccessGroup = v
+ return s
+}
+
+type CreateVerifiedAccessInstanceInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the Amazon Web Services Verified Access instance.
+ Description *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The tags to assign to the Amazon Web Services Verified Access instance.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessInstanceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessInstanceInput) GoString() string {
+ return s.String()
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateVerifiedAccessInstanceInput) SetClientToken(v string) *CreateVerifiedAccessInstanceInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *CreateVerifiedAccessInstanceInput) SetDescription(v string) *CreateVerifiedAccessInstanceInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateVerifiedAccessInstanceInput) SetDryRun(v bool) *CreateVerifiedAccessInstanceInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateVerifiedAccessInstanceInput) SetTagSpecifications(v []*TagSpecification) *CreateVerifiedAccessInstanceInput {
+ s.TagSpecifications = v
+ return s
+}
+
+type CreateVerifiedAccessInstanceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessInstanceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessInstanceOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessInstance sets the VerifiedAccessInstance field's value.
+func (s *CreateVerifiedAccessInstanceOutput) SetVerifiedAccessInstance(v *VerifiedAccessInstance) *CreateVerifiedAccessInstanceOutput {
+ s.VerifiedAccessInstance = v
+ return s
+}
+
+// Options for a device-identity type trust provider.
+type CreateVerifiedAccessTrustProviderDeviceOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the tenant application with the device-identity provider.
+ TenantId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessTrustProviderDeviceOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessTrustProviderDeviceOptions) GoString() string {
+ return s.String()
+}
+
+// SetTenantId sets the TenantId field's value.
+func (s *CreateVerifiedAccessTrustProviderDeviceOptions) SetTenantId(v string) *CreateVerifiedAccessTrustProviderDeviceOptions {
+ s.TenantId = &v
+ return s
+}
+
+type CreateVerifiedAccessTrustProviderInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the Amazon Web Services Verified Access trust provider.
+ Description *string `type:"string"`
+
+ // The options for device identity based trust providers.
+ DeviceOptions *CreateVerifiedAccessTrustProviderDeviceOptions `type:"structure"`
+
+ // The type of device-based trust provider.
+ DeviceTrustProviderType *string `type:"string" enum:"DeviceTrustProviderType"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The OpenID Connect details for an oidc-type, user-identity based trust provider.
+ OidcOptions *CreateVerifiedAccessTrustProviderOidcOptions `type:"structure"`
+
+ // The identifier to be used when working with policy rules.
+ //
+ // PolicyReferenceName is a required field
+ PolicyReferenceName *string `type:"string" required:"true"`
+
+ // The tags to assign to the Amazon Web Services Verified Access trust provider.
+ TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
+
+ // The type of trust provider can be either user or device-based.
+ //
+ // TrustProviderType is a required field
+ TrustProviderType *string `type:"string" required:"true" enum:"TrustProviderType"`
+
+ // The type of user-based trust provider.
+ UserTrustProviderType *string `type:"string" enum:"UserTrustProviderType"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessTrustProviderInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessTrustProviderInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateVerifiedAccessTrustProviderInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateVerifiedAccessTrustProviderInput"}
+ if s.PolicyReferenceName == nil {
+ invalidParams.Add(request.NewErrParamRequired("PolicyReferenceName"))
+ }
+ if s.TrustProviderType == nil {
+ invalidParams.Add(request.NewErrParamRequired("TrustProviderType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetClientToken(v string) *CreateVerifiedAccessTrustProviderInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetDescription(v string) *CreateVerifiedAccessTrustProviderInput {
+ s.Description = &v
+ return s
+}
+
+// SetDeviceOptions sets the DeviceOptions field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetDeviceOptions(v *CreateVerifiedAccessTrustProviderDeviceOptions) *CreateVerifiedAccessTrustProviderInput {
+ s.DeviceOptions = v
+ return s
+}
+
+// SetDeviceTrustProviderType sets the DeviceTrustProviderType field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetDeviceTrustProviderType(v string) *CreateVerifiedAccessTrustProviderInput {
+ s.DeviceTrustProviderType = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetDryRun(v bool) *CreateVerifiedAccessTrustProviderInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetOidcOptions sets the OidcOptions field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetOidcOptions(v *CreateVerifiedAccessTrustProviderOidcOptions) *CreateVerifiedAccessTrustProviderInput {
+ s.OidcOptions = v
+ return s
+}
+
+// SetPolicyReferenceName sets the PolicyReferenceName field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetPolicyReferenceName(v string) *CreateVerifiedAccessTrustProviderInput {
+ s.PolicyReferenceName = &v
+ return s
+}
+
+// SetTagSpecifications sets the TagSpecifications field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetTagSpecifications(v []*TagSpecification) *CreateVerifiedAccessTrustProviderInput {
+ s.TagSpecifications = v
+ return s
+}
+
+// SetTrustProviderType sets the TrustProviderType field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetTrustProviderType(v string) *CreateVerifiedAccessTrustProviderInput {
+ s.TrustProviderType = &v
+ return s
+}
+
+// SetUserTrustProviderType sets the UserTrustProviderType field's value.
+func (s *CreateVerifiedAccessTrustProviderInput) SetUserTrustProviderType(v string) *CreateVerifiedAccessTrustProviderInput {
+ s.UserTrustProviderType = &v
+ return s
+}
+
+// Options for an OIDC-based, user-identity type trust provider.
+type CreateVerifiedAccessTrustProviderOidcOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The OIDC authorization endpoint.
+ AuthorizationEndpoint *string `type:"string"`
+
+ // The client identifier.
+ ClientId *string `type:"string"`
+
+ // The client secret.
+ ClientSecret *string `type:"string"`
+
+ // The OIDC issuer.
+ Issuer *string `type:"string"`
+
+ // OpenID Connect (OIDC) scopes are used by an application during authentication
+ // to authorize access to a user's details. Each scope returns a specific set
+ // of user attributes.
+ Scope *string `type:"string"`
+
+ // The OIDC token endpoint.
+ TokenEndpoint *string `type:"string"`
+
+ // The OIDC user info endpoint.
+ UserInfoEndpoint *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessTrustProviderOidcOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessTrustProviderOidcOptions) GoString() string {
+ return s.String()
+}
+
+// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value.
+func (s *CreateVerifiedAccessTrustProviderOidcOptions) SetAuthorizationEndpoint(v string) *CreateVerifiedAccessTrustProviderOidcOptions {
+ s.AuthorizationEndpoint = &v
+ return s
+}
+
+// SetClientId sets the ClientId field's value.
+func (s *CreateVerifiedAccessTrustProviderOidcOptions) SetClientId(v string) *CreateVerifiedAccessTrustProviderOidcOptions {
+ s.ClientId = &v
+ return s
+}
+
+// SetClientSecret sets the ClientSecret field's value.
+func (s *CreateVerifiedAccessTrustProviderOidcOptions) SetClientSecret(v string) *CreateVerifiedAccessTrustProviderOidcOptions {
+ s.ClientSecret = &v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *CreateVerifiedAccessTrustProviderOidcOptions) SetIssuer(v string) *CreateVerifiedAccessTrustProviderOidcOptions {
+ s.Issuer = &v
+ return s
+}
+
+// SetScope sets the Scope field's value.
+func (s *CreateVerifiedAccessTrustProviderOidcOptions) SetScope(v string) *CreateVerifiedAccessTrustProviderOidcOptions {
+ s.Scope = &v
+ return s
+}
+
+// SetTokenEndpoint sets the TokenEndpoint field's value.
+func (s *CreateVerifiedAccessTrustProviderOidcOptions) SetTokenEndpoint(v string) *CreateVerifiedAccessTrustProviderOidcOptions {
+ s.TokenEndpoint = &v
+ return s
+}
+
+// SetUserInfoEndpoint sets the UserInfoEndpoint field's value.
+func (s *CreateVerifiedAccessTrustProviderOidcOptions) SetUserInfoEndpoint(v string) *CreateVerifiedAccessTrustProviderOidcOptions {
+ s.UserInfoEndpoint = &v
+ return s
+}
+
+type CreateVerifiedAccessTrustProviderOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessTrustProviderOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateVerifiedAccessTrustProviderOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessTrustProvider sets the VerifiedAccessTrustProvider field's value.
+func (s *CreateVerifiedAccessTrustProviderOutput) SetVerifiedAccessTrustProvider(v *VerifiedAccessTrustProvider) *CreateVerifiedAccessTrustProviderOutput {
+ s.VerifiedAccessTrustProvider = v
+ return s
+}
+
type CreateVolumeInput struct {
_ struct{} `type:"structure"`
@@ -72823,6 +78039,9 @@ type CreateVolumeInput struct {
//
// * Magnetic: standard
//
+ // Throughput Optimized HDD (st1) and Cold HDD (sc1) volumes can't be used as
+ // boot volumes.
+ //
// For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
@@ -73029,8 +78248,8 @@ type CreateVpcEndpointConnectionNotificationInput struct {
// of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string `type:"string"`
- // One or more endpoint events for which to receive notifications. Valid values
- // are Accept, Connect, Delete, and Reject.
+ // The endpoint events for which to receive notifications. Valid values are
+ // Accept, Connect, Delete, and Reject.
//
// ConnectionEvents is a required field
ConnectionEvents []*string `locationNameList:"item" type:"list" required:"true"`
@@ -73164,7 +78383,6 @@ func (s *CreateVpcEndpointConnectionNotificationOutput) SetConnectionNotificatio
return s
}
-// Contains the parameters for CreateVpcEndpoint.
type CreateVpcEndpointInput struct {
_ struct{} `type:"structure"`
@@ -73205,22 +78423,22 @@ type CreateVpcEndpointInput struct {
// Default: true
PrivateDnsEnabled *bool `type:"boolean"`
- // (Gateway endpoint) One or more route table IDs.
+ // (Gateway endpoint) The route table IDs.
RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"`
- // (Interface endpoint) The ID of one or more security groups to associate with
- // the endpoint network interface.
+ // (Interface endpoint) The IDs of the security groups to associate with the
+ // endpoint network interface. If this parameter is not specified, we use the
+ // default security group for the VPC.
SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"`
- // The service name. To get a list of available services, use the DescribeVpcEndpointServices
- // request, or get the name from the service provider.
+ // The service name.
//
// ServiceName is a required field
ServiceName *string `type:"string" required:"true"`
- // (Interface and Gateway Load Balancer endpoints) The ID of one or more subnets
- // in which to create an endpoint network interface. For a Gateway Load Balancer
- // endpoint, you can specify one subnet only.
+ // (Interface and Gateway Load Balancer endpoints) The IDs of the subnets in
+ // which to create an endpoint network interface. For a Gateway Load Balancer
+ // endpoint, you can specify only one subnet.
SubnetIds []*string `locationName:"SubnetId" locationNameList:"item" type:"list"`
// The tags to associate with the endpoint.
@@ -73231,7 +78449,7 @@ type CreateVpcEndpointInput struct {
// Default: Gateway
VpcEndpointType *string `type:"string" enum:"VpcEndpointType"`
- // The ID of the VPC in which the endpoint will be used.
+ // The ID of the VPC for the endpoint.
//
// VpcId is a required field
VpcId *string `type:"string" required:"true"`
@@ -73349,7 +78567,6 @@ func (s *CreateVpcEndpointInput) SetVpcId(v string) *CreateVpcEndpointInput {
return s
}
-// Contains the output of CreateVpcEndpoint.
type CreateVpcEndpointOutput struct {
_ struct{} `type:"structure"`
@@ -73408,11 +78625,10 @@ type CreateVpcEndpointServiceConfigurationInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // The Amazon Resource Names (ARNs) of one or more Gateway Load Balancers.
+ // The Amazon Resource Names (ARNs) of the Gateway Load Balancers.
GatewayLoadBalancerArns []*string `locationName:"GatewayLoadBalancerArn" locationNameList:"item" type:"list"`
- // The Amazon Resource Names (ARNs) of one or more Network Load Balancers for
- // your service.
+ // The Amazon Resource Names (ARNs) of the Network Load Balancers.
NetworkLoadBalancerArns []*string `locationName:"NetworkLoadBalancerArn" locationNameList:"item" type:"list"`
// (Interface endpoint configuration) The private DNS name to assign to the
@@ -73758,7 +78974,9 @@ type CreateVpcPeeringConnectionInput struct {
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
// The ID of the requester VPC. You must specify this parameter in the request.
- VpcId *string `locationName:"vpcId" type:"string"`
+ //
+ // VpcId is a required field
+ VpcId *string `locationName:"vpcId" type:"string" required:"true"`
}
// String returns the string representation.
@@ -73779,6 +78997,19 @@ func (s CreateVpcPeeringConnectionInput) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateVpcPeeringConnectionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateVpcPeeringConnectionInput"}
+ if s.VpcId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VpcId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetDryRun sets the DryRun field's value.
func (s *CreateVpcPeeringConnectionInput) SetDryRun(v bool) *CreateVpcPeeringConnectionInput {
s.DryRun = &v
@@ -74387,8 +79618,8 @@ type DataQuery struct {
// query, the dataResponse identifies the query as MyQuery01.
Id *string `type:"string"`
- // The aggregation metric used for the data query. Currently only aggregation-latency
- // is supported, indicating network latency.
+ // The metric, aggregation-latency, indicating that network latency is aggregated
+ // for the query. This is the only supported metric.
Metric *string `type:"string" enum:"MetricType"`
// The aggregation period used for the data query.
@@ -74398,12 +79629,10 @@ type DataQuery struct {
// example, us-east-1.
Source *string `type:"string"`
- // Metric data aggregations over specified periods of time. The following are
- // the supported Infrastructure Performance statistics:
- //
- // * p50 - The median value of the metric aggregated over a specified start
- // and end time. For example, a metric of five_minutes is the median of all
- // the data points gathered within those five minutes.
+ // The metric data aggregation period, p50, between the specified startDate
+ // and endDate. For example, a metric of five_minutes is the median of all the
+ // data points gathered within those five minutes. p50 is the only supported
+ // metric.
Statistic *string `type:"string" enum:"StatisticType"`
}
@@ -74472,8 +79701,8 @@ type DataResponse struct {
// The ID passed in the DataQuery.
Id *string `locationName:"id" type:"string"`
- // The metric used for the network performance request. Currently only aggregate-latency
- // is supported, showing network latency during a specified period.
+ // The metric used for the network performance request. Only aggregate-latency
+ // is supported, which shows network latency during a specified period.
Metric *string `locationName:"metric" type:"string" enum:"MetricType"`
// A list of MetricPoint objects.
@@ -75428,11 +80657,11 @@ type DeleteFleetsInput struct {
// FleetIds is a required field
FleetIds []*string `locationName:"FleetId" type:"list" required:"true"`
- // Indicates whether to terminate the instances when the EC2 Fleet is deleted.
- // The default is to terminate the instances.
+ // Indicates whether to terminate the associated instances when the EC2 Fleet
+ // is deleted. The default is to terminate the instances.
//
// To let the instances continue to run after the EC2 Fleet is deleted, specify
- // NoTerminateInstances. Supported only for fleets of type maintain and request.
+ // no-terminate-instances. Supported only for fleets of type maintain and request.
//
// For instant fleets, you cannot specify NoTerminateInstances. A deleted instant
// fleet with running instances is not supported.
@@ -76095,6 +81324,95 @@ func (s *DeleteIpamPoolOutput) SetIpamPool(v *IpamPool) *DeleteIpamPoolOutput {
return s
}
+type DeleteIpamResourceDiscoveryInput struct {
+ _ struct{} `type:"structure"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The IPAM resource discovery ID.
+ //
+ // IpamResourceDiscoveryId is a required field
+ IpamResourceDiscoveryId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteIpamResourceDiscoveryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteIpamResourceDiscoveryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteIpamResourceDiscoveryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteIpamResourceDiscoveryInput"}
+ if s.IpamResourceDiscoveryId == nil {
+ invalidParams.Add(request.NewErrParamRequired("IpamResourceDiscoveryId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DeleteIpamResourceDiscoveryInput) SetDryRun(v bool) *DeleteIpamResourceDiscoveryInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryId sets the IpamResourceDiscoveryId field's value.
+func (s *DeleteIpamResourceDiscoveryInput) SetIpamResourceDiscoveryId(v string) *DeleteIpamResourceDiscoveryInput {
+ s.IpamResourceDiscoveryId = &v
+ return s
+}
+
+type DeleteIpamResourceDiscoveryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The IPAM resource discovery.
+ IpamResourceDiscovery *IpamResourceDiscovery `locationName:"ipamResourceDiscovery" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteIpamResourceDiscoveryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteIpamResourceDiscoveryOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamResourceDiscovery sets the IpamResourceDiscovery field's value.
+func (s *DeleteIpamResourceDiscoveryOutput) SetIpamResourceDiscovery(v *IpamResourceDiscovery) *DeleteIpamResourceDiscoveryOutput {
+ s.IpamResourceDiscovery = v
+ return s
+}
+
type DeleteIpamScopeInput struct {
_ struct{} `type:"structure"`
@@ -76598,9 +81916,11 @@ type DeleteLocalGatewayRouteInput struct {
_ struct{} `type:"structure"`
// The CIDR range for the route. This must match the CIDR for the route exactly.
- //
- // DestinationCidrBlock is a required field
- DestinationCidrBlock *string `type:"string" required:"true"`
+ DestinationCidrBlock *string `type:"string"`
+
+ // Use a prefix list in place of DestinationCidrBlock. You cannot use DestinationPrefixListId
+ // and DestinationCidrBlock in the same request.
+ DestinationPrefixListId *string `type:"string"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
@@ -76635,9 +81955,6 @@ func (s DeleteLocalGatewayRouteInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteLocalGatewayRouteInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteLocalGatewayRouteInput"}
- if s.DestinationCidrBlock == nil {
- invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock"))
- }
if s.LocalGatewayRouteTableId == nil {
invalidParams.Add(request.NewErrParamRequired("LocalGatewayRouteTableId"))
}
@@ -76654,6 +81971,12 @@ func (s *DeleteLocalGatewayRouteInput) SetDestinationCidrBlock(v string) *Delete
return s
}
+// SetDestinationPrefixListId sets the DestinationPrefixListId field's value.
+func (s *DeleteLocalGatewayRouteInput) SetDestinationPrefixListId(v string) *DeleteLocalGatewayRouteInput {
+ s.DestinationPrefixListId = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *DeleteLocalGatewayRouteInput) SetDryRun(v bool) *DeleteLocalGatewayRouteInput {
s.DryRun = &v
@@ -80216,6 +85539,406 @@ func (s *DeleteTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment
return s
}
+type DeleteVerifiedAccessEndpointInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ //
+ // VerifiedAccessEndpointId is a required field
+ VerifiedAccessEndpointId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessEndpointInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessEndpointInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteVerifiedAccessEndpointInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteVerifiedAccessEndpointInput"}
+ if s.VerifiedAccessEndpointId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessEndpointId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *DeleteVerifiedAccessEndpointInput) SetClientToken(v string) *DeleteVerifiedAccessEndpointInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DeleteVerifiedAccessEndpointInput) SetDryRun(v bool) *DeleteVerifiedAccessEndpointInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessEndpointId sets the VerifiedAccessEndpointId field's value.
+func (s *DeleteVerifiedAccessEndpointInput) SetVerifiedAccessEndpointId(v string) *DeleteVerifiedAccessEndpointInput {
+ s.VerifiedAccessEndpointId = &v
+ return s
+}
+
+type DeleteVerifiedAccessEndpointOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ VerifiedAccessEndpoint *VerifiedAccessEndpoint `locationName:"verifiedAccessEndpoint" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessEndpointOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessEndpointOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessEndpoint sets the VerifiedAccessEndpoint field's value.
+func (s *DeleteVerifiedAccessEndpointOutput) SetVerifiedAccessEndpoint(v *VerifiedAccessEndpoint) *DeleteVerifiedAccessEndpointOutput {
+ s.VerifiedAccessEndpoint = v
+ return s
+}
+
+type DeleteVerifiedAccessGroupInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access group.
+ //
+ // VerifiedAccessGroupId is a required field
+ VerifiedAccessGroupId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessGroupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessGroupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteVerifiedAccessGroupInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteVerifiedAccessGroupInput"}
+ if s.VerifiedAccessGroupId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessGroupId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *DeleteVerifiedAccessGroupInput) SetClientToken(v string) *DeleteVerifiedAccessGroupInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DeleteVerifiedAccessGroupInput) SetDryRun(v bool) *DeleteVerifiedAccessGroupInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *DeleteVerifiedAccessGroupInput) SetVerifiedAccessGroupId(v string) *DeleteVerifiedAccessGroupInput {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+type DeleteVerifiedAccessGroupOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access group.
+ VerifiedAccessGroup *VerifiedAccessGroup `locationName:"verifiedAccessGroup" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessGroupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessGroupOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessGroup sets the VerifiedAccessGroup field's value.
+func (s *DeleteVerifiedAccessGroupOutput) SetVerifiedAccessGroup(v *VerifiedAccessGroup) *DeleteVerifiedAccessGroupOutput {
+ s.VerifiedAccessGroup = v
+ return s
+}
+
+type DeleteVerifiedAccessInstanceInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ //
+ // VerifiedAccessInstanceId is a required field
+ VerifiedAccessInstanceId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessInstanceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessInstanceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteVerifiedAccessInstanceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteVerifiedAccessInstanceInput"}
+ if s.VerifiedAccessInstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessInstanceId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *DeleteVerifiedAccessInstanceInput) SetClientToken(v string) *DeleteVerifiedAccessInstanceInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DeleteVerifiedAccessInstanceInput) SetDryRun(v bool) *DeleteVerifiedAccessInstanceInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *DeleteVerifiedAccessInstanceInput) SetVerifiedAccessInstanceId(v string) *DeleteVerifiedAccessInstanceInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+type DeleteVerifiedAccessInstanceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessInstanceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessInstanceOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessInstance sets the VerifiedAccessInstance field's value.
+func (s *DeleteVerifiedAccessInstanceOutput) SetVerifiedAccessInstance(v *VerifiedAccessInstance) *DeleteVerifiedAccessInstanceOutput {
+ s.VerifiedAccessInstance = v
+ return s
+}
+
+type DeleteVerifiedAccessTrustProviderInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ //
+ // VerifiedAccessTrustProviderId is a required field
+ VerifiedAccessTrustProviderId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessTrustProviderInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessTrustProviderInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteVerifiedAccessTrustProviderInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteVerifiedAccessTrustProviderInput"}
+ if s.VerifiedAccessTrustProviderId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessTrustProviderId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *DeleteVerifiedAccessTrustProviderInput) SetClientToken(v string) *DeleteVerifiedAccessTrustProviderInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DeleteVerifiedAccessTrustProviderInput) SetDryRun(v bool) *DeleteVerifiedAccessTrustProviderInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessTrustProviderId sets the VerifiedAccessTrustProviderId field's value.
+func (s *DeleteVerifiedAccessTrustProviderInput) SetVerifiedAccessTrustProviderId(v string) *DeleteVerifiedAccessTrustProviderInput {
+ s.VerifiedAccessTrustProviderId = &v
+ return s
+}
+
+type DeleteVerifiedAccessTrustProviderOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessTrustProviderOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteVerifiedAccessTrustProviderOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessTrustProvider sets the VerifiedAccessTrustProvider field's value.
+func (s *DeleteVerifiedAccessTrustProviderOutput) SetVerifiedAccessTrustProvider(v *VerifiedAccessTrustProvider) *DeleteVerifiedAccessTrustProviderOutput {
+ s.VerifiedAccessTrustProvider = v
+ return s
+}
+
type DeleteVolumeInput struct {
_ struct{} `type:"structure"`
@@ -80299,7 +86022,7 @@ func (s DeleteVolumeOutput) GoString() string {
type DeleteVpcEndpointConnectionNotificationsInput struct {
_ struct{} `type:"structure"`
- // One or more notification IDs.
+ // The IDs of the notifications.
//
// ConnectionNotificationIds is a required field
ConnectionNotificationIds []*string `locationName:"ConnectionNotificationId" locationNameList:"item" type:"list" required:"true"`
@@ -80394,7 +86117,7 @@ type DeleteVpcEndpointServiceConfigurationsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // The IDs of one or more services.
+ // The IDs of the services.
//
// ServiceIds is a required field
ServiceIds []*string `locationName:"ServiceId" locationNameList:"item" type:"list" required:"true"`
@@ -80474,7 +86197,6 @@ func (s *DeleteVpcEndpointServiceConfigurationsOutput) SetUnsuccessful(v []*Unsu
return s
}
-// Contains the parameters for DeleteVpcEndpoints.
type DeleteVpcEndpointsInput struct {
_ struct{} `type:"structure"`
@@ -80484,7 +86206,7 @@ type DeleteVpcEndpointsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // One or more VPC endpoint IDs.
+ // The IDs of the VPC endpoints.
//
// VpcEndpointIds is a required field
VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"`
@@ -80533,7 +86255,6 @@ func (s *DeleteVpcEndpointsInput) SetVpcEndpointIds(v []*string) *DeleteVpcEndpo
return s
}
-// Contains the output of DeleteVpcEndpoints.
type DeleteVpcEndpointsOutput struct {
_ struct{} `type:"structure"`
@@ -81170,7 +86891,10 @@ func (s *DeprovisionIpamPoolCidrOutput) SetIpamPoolCidr(v *IpamPoolCidr) *Deprov
type DeprovisionPublicIpv4PoolCidrInput struct {
_ struct{} `type:"structure"`
- // The CIDR you want to deprovision from the pool.
+ // The CIDR you want to deprovision from the pool. Enter the CIDR you want to
+ // deprovision with a netmask of /32. You must rerun this command for each IP
+ // address in the CIDR range. If your CIDR is a /24, you will have to run this
+ // command to deprovision each of the 256 IP addresses in the /24 CIDR.
//
// Cidr is a required field
Cidr *string `type:"string" required:"true"`
@@ -82207,7 +87931,7 @@ type DescribeAvailabilityZonesInput struct {
//
// * message - The Zone message.
//
- // * opt-in-status - The opt-in status (opted-in, and not-opted-in | opt-in-not-required).
+ // * opt-in-status - The opt-in status (opted-in | not-opted-in | opt-in-not-required).
//
// * parent-zoneID - The ID of the zone that handles some of the Local Zone
// and Wavelength Zone control plane operations, such as API calls.
@@ -82224,13 +87948,11 @@ type DescribeAvailabilityZonesInput struct {
// Local Zone (for example, usw2-lax1-az1), or the Wavelength Zone (for example,
// us-east-1-wl1-bos-wlz-1).
//
- // * zone-type - The type of zone, for example, local-zone.
- //
// * zone-name - The name of the Availability Zone (for example, us-east-1a),
// the Local Zone (for example, us-west-2-lax-1a), or the Wavelength Zone
// (for example, us-east-1-wl1-bos-wlz-1).
//
- // * zone-type - The type of zone, for example, local-zone.
+ // * zone-type - The type of zone (availability-zone | local-zone | wavelength-zone).
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The IDs of the Availability Zones, Local Zones, and Wavelength Zones.
@@ -83144,13 +88866,15 @@ type DescribeClassicLinkInstancesInput struct {
// One or more instance IDs. Must be instances linked to a VPC through ClassicLink.
InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
//
// Constraint: If the value is greater than 1000, we return only 1000 items.
MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -83221,8 +88945,8 @@ type DescribeClassicLinkInstancesOutput struct {
// Information about one or more linked EC2-Classic instances.
Instances []*ClassicLinkInstance `locationName:"instancesSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -84296,11 +90020,13 @@ type DescribeDhcpOptionsInput struct {
// the tag value.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -84371,8 +90097,8 @@ type DescribeDhcpOptionsOutput struct {
// Information about one or more DHCP options sets.
DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -84431,11 +90157,13 @@ type DescribeEgressOnlyInternetGatewaysInput struct {
// the tag value.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -84506,8 +90234,8 @@ type DescribeEgressOnlyInternetGatewaysOutput struct {
// Information about the egress-only internet gateways.
EgressOnlyInternetGateways []*EgressOnlyInternetGateway `locationName:"egressOnlyInternetGatewaySet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -84912,12 +90640,13 @@ type DescribeFastLaunchImagesInput struct {
// Details for one or more Windows AMI image IDs.
ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another request with the returned NextToken value.
- // If this parameter is not specified, then all results are returned.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token for the next set of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -84976,8 +90705,8 @@ type DescribeFastLaunchImagesOutput struct {
// meet the requested criteria.
FastLaunchImages []*DescribeFastLaunchImagesSuccessItem `locationName:"fastLaunchImageSet" locationNameList:"item" type:"list"`
- // The token to use for the next set of results. This value is null when there
- // are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -85024,7 +90753,8 @@ type DescribeFastLaunchImagesSuccessItem struct {
// launches Windows instances from pre-provisioned snapshots.
LaunchTemplate *FastLaunchLaunchTemplateSpecificationResponse `locationName:"launchTemplate" type:"structure"`
- // The maximum number of parallel instances that are launched for creating resources.
+ // The maximum number of instances that Amazon EC2 can launch at the same time
+ // to create pre-provisioned snapshots for Windows faster launching.
MaxParallelLaunches *int64 `locationName:"maxParallelLaunches" type:"integer"`
// The owner ID for the fast-launch enabled Windows AMI.
@@ -85274,11 +91004,13 @@ type DescribeFastSnapshotRestoresInput struct {
// | optimizing | enabled | disabling | disabled).
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -85330,8 +91062,8 @@ type DescribeFastSnapshotRestoresOutput struct {
// Information about the state of fast snapshot restores.
FastSnapshotRestores []*DescribeFastSnapshotRestoreSuccessItem `locationName:"fastSnapshotRestoreSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -85446,12 +91178,13 @@ type DescribeFleetHistoryInput struct {
// FleetId is a required field
FleetId *string `type:"string" required:"true"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token for the next set of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// The start date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
@@ -85542,10 +91275,11 @@ type DescribeFleetHistoryOutput struct {
// The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
// All records up to this time were retrieved.
//
- // If nextToken indicates that there are more results, this value is not present.
+ // If nextToken indicates that there are more items, this value is not present.
LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp"`
- // The token for the next set of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The start date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
@@ -85619,12 +91353,13 @@ type DescribeFleetInstancesInput struct {
// FleetId is a required field
FleetId *string `type:"string" required:"true"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token for the next set of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -85699,7 +91434,8 @@ type DescribeFleetInstancesOutput struct {
// The ID of the EC2 Fleet.
FleetId *string `locationName:"fleetId" type:"string"`
- // The token for the next set of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -85772,12 +91508,13 @@ type DescribeFleetsInput struct {
// does not appear in the response.
FleetIds []*string `locationName:"FleetId" type:"list"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token for the next set of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -85906,7 +91643,8 @@ type DescribeFleetsOutput struct {
// Information about the EC2 Fleets.
Fleets []*FleetData `locationName:"fleetSet" locationNameList:"item" type:"list"`
- // The token for the next set of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -85980,11 +91718,13 @@ type DescribeFlowLogsInput struct {
// Constraint: Maximum of 1000 flow log IDs.
FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token for the next page of results.
+ // The token to request the next page of items. Pagination continues from the
+ // end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -86042,8 +91782,8 @@ type DescribeFlowLogsOutput struct {
// Information about the flow logs.
FlowLogs []*FlowLog `locationName:"flowLogSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to request the next page of items. This value is null when there
+ // are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -86747,11 +92487,13 @@ type DescribeIamInstanceProfileAssociationsInput struct {
// * state - The state of the association (associating | associated | disassociating).
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token to request the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -86816,8 +92558,8 @@ type DescribeIamInstanceProfileAssociationsOutput struct {
// Information about the IAM instance profile associations.
IamInstanceProfileAssociations []*IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociationSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -87110,7 +92852,7 @@ type DescribeImageAttributeOutput struct {
// by default, the instance requires that IMDSv2 is used when requesting instance
// metadata. In addition, HttpPutResponseHopLimit is set to 2. For more information,
// see Configure the AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html#configure-IMDS-new-instances-ami-configuration)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
ImdsSupport *AttributeValue `locationName:"imdsSupport" type:"structure"`
// The kernel ID.
@@ -87144,7 +92886,7 @@ type DescribeImageAttributeOutput struct {
// command. You can inspect and modify the UEFI data by using the python-uefivars
// tool (https://github.com/awslabs/python-uefivars) on GitHub. For more information,
// see UEFI Secure Boot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
UefiData *AttributeValue `locationName:"uefiData" type:"structure"`
}
@@ -87371,6 +93113,15 @@ type DescribeImagesInput struct {
// of what you specify for this parameter.
IncludeDeprecated *bool `type:"boolean"`
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
+ MaxResults *int64 `type:"integer"`
+
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
+ NextToken *string `type:"string"`
+
// Scopes the results to images with the specified owners. You can specify a
// combination of Amazon Web Services account IDs, self, amazon, and aws-marketplace.
// If you omit this parameter, the results include all images for which you
@@ -87426,6 +93177,18 @@ func (s *DescribeImagesInput) SetIncludeDeprecated(v bool) *DescribeImagesInput
return s
}
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeImagesInput) SetMaxResults(v int64) *DescribeImagesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeImagesInput) SetNextToken(v string) *DescribeImagesInput {
+ s.NextToken = &v
+ return s
+}
+
// SetOwners sets the Owners field's value.
func (s *DescribeImagesInput) SetOwners(v []*string) *DescribeImagesInput {
s.Owners = v
@@ -87437,6 +93200,10 @@ type DescribeImagesOutput struct {
// Information about the images.
Images []*Image `locationName:"imagesSet" locationNameList:"item" type:"list"`
+
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation.
@@ -87463,6 +93230,12 @@ func (s *DescribeImagesOutput) SetImages(v []*Image) *DescribeImagesOutput {
return s
}
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeImagesOutput) SetNextToken(v string) *DescribeImagesOutput {
+ s.NextToken = &v
+ return s
+}
+
type DescribeImportImageTasksInput struct {
_ struct{} `type:"structure"`
@@ -87970,13 +93743,16 @@ type DescribeInstanceCreditSpecificationsInput struct {
// Constraints: Maximum 1000 explicitly specified instance IDs.
InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another call with the returned NextToken value. This
- // value can be between 5 and 1000. You cannot specify this parameter and the
- // instance IDs parameter in the same call.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
+ //
+ // You cannot specify this parameter and the instance IDs parameter in the same
+ // call.
MaxResults *int64 `min:"5" type:"integer"`
- // The token to retrieve the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -88047,8 +93823,8 @@ type DescribeInstanceCreditSpecificationsOutput struct {
// Information about the credit option for CPU usage of an instance.
InstanceCreditSpecifications []*InstanceCreditSpecification `locationName:"instanceCreditSpecificationSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -88372,13 +94148,16 @@ type DescribeInstanceStatusInput struct {
// Constraints: Maximum 100 explicitly specified instance IDs.
InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another call with the returned NextToken value. This
- // value can be between 5 and 1000. You cannot specify this parameter and the
- // instance IDs parameter in the same call.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
+ //
+ // You cannot specify this parameter and the instance IDs parameter in the same
+ // request.
MaxResults *int64 `type:"integer"`
- // The token to retrieve the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -88442,8 +94221,8 @@ type DescribeInstanceStatusOutput struct {
// Information about the status of the instances.
InstanceStatuses []*InstanceStatus `locationName:"instanceStatusSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -88498,12 +94277,13 @@ type DescribeInstanceTypeOfferingsInput struct {
// The location type.
LocationType *string `type:"string" enum:"LocationType"`
- // The maximum number of results to return for the request in a single page.
- // The remaining results can be seen by sending another request with the next
- // token value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token to retrieve the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -88574,8 +94354,8 @@ type DescribeInstanceTypeOfferingsOutput struct {
// The instance types offered.
InstanceTypeOfferings []*InstanceTypeOffering `locationName:"instanceTypeOfferingSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -88757,12 +94537,13 @@ type DescribeInstanceTypesInput struct {
// in the Amazon EC2 User Guide.
InstanceTypes []*string `locationName:"InstanceType" type:"list" enum:"InstanceType"`
- // The maximum number of results to return for the request in a single page.
- // The remaining results can be seen by sending another request with the next
- // token value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token to retrieve the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -88834,8 +94615,8 @@ type DescribeInstanceTypesOutput struct {
// in the Amazon EC2 User Guide.
InstanceTypes []*InstanceTypeInfo `locationName:"instanceTypeSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -88967,11 +94748,14 @@ type DescribeInstancesInput struct {
// * metadata-options.http-tokens - The metadata request authorization state
// (optional | required)
//
- // * metadata-options.http-put-response-hop-limit - The http metadata request
+ // * metadata-options.http-put-response-hop-limit - The HTTP metadata request
// put response hop limit (integer, possible values 1 to 64)
//
- // * metadata-options.http-endpoint - Enable or disable metadata access on
- // http endpoint (enabled | disabled)
+ // * metadata-options.http-endpoint - The status of access to the HTTP metadata
+ // endpoint on your instance (enabled | disabled)
+ //
+ // * metadata-options.instance-metadata-tags - The status of access to instance
+ // tags from the instance metadata (enabled | disabled)
//
// * monitoring-state - Indicates whether detailed monitoring is enabled
// (disabled | enabled).
@@ -89141,13 +94925,16 @@ type DescribeInstancesInput struct {
// Default: Describes all your instances.
InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another call with the returned NextToken value. This
- // value can be between 5 and 1000. You cannot specify this parameter and the
- // instance IDs parameter in the same call.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
+ //
+ // You cannot specify this parameter and the instance IDs parameter in the same
+ // request.
MaxResults *int64 `locationName:"maxResults" type:"integer"`
- // The token to request the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -89202,8 +94989,8 @@ func (s *DescribeInstancesInput) SetNextToken(v string) *DescribeInstancesInput
type DescribeInstancesOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the reservations.
@@ -89277,11 +95064,13 @@ type DescribeInternetGatewaysInput struct {
// Default: Describes all your internet gateways.
InternetGatewayIds []*string `locationName:"internetGatewayId" locationNameList:"item" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -89352,8 +95141,8 @@ type DescribeInternetGatewaysOutput struct {
// Information about one or more internet gateways.
InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -89512,6 +95301,257 @@ func (s *DescribeIpamPoolsOutput) SetNextToken(v string) *DescribeIpamPoolsOutpu
return s
}
+type DescribeIpamResourceDiscoveriesInput struct {
+ _ struct{} `type:"structure"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The resource discovery filters.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The IPAM resource discovery IDs.
+ IpamResourceDiscoveryIds []*string `locationName:"IpamResourceDiscoveryId" locationNameList:"item" type:"list"`
+
+ // The maximum number of resource discoveries to return in one page of results.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // Specify the pagination token from a previous request to retrieve the next
+ // page of results.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeIpamResourceDiscoveriesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeIpamResourceDiscoveriesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeIpamResourceDiscoveriesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeIpamResourceDiscoveriesInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeIpamResourceDiscoveriesInput) SetDryRun(v bool) *DescribeIpamResourceDiscoveriesInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeIpamResourceDiscoveriesInput) SetFilters(v []*Filter) *DescribeIpamResourceDiscoveriesInput {
+ s.Filters = v
+ return s
+}
+
+// SetIpamResourceDiscoveryIds sets the IpamResourceDiscoveryIds field's value.
+func (s *DescribeIpamResourceDiscoveriesInput) SetIpamResourceDiscoveryIds(v []*string) *DescribeIpamResourceDiscoveriesInput {
+ s.IpamResourceDiscoveryIds = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeIpamResourceDiscoveriesInput) SetMaxResults(v int64) *DescribeIpamResourceDiscoveriesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeIpamResourceDiscoveriesInput) SetNextToken(v string) *DescribeIpamResourceDiscoveriesInput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeIpamResourceDiscoveriesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The resource discoveries.
+ IpamResourceDiscoveries []*IpamResourceDiscovery `locationName:"ipamResourceDiscoverySet" locationNameList:"item" type:"list"`
+
+ // Specify the pagination token from a previous request to retrieve the next
+ // page of results.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeIpamResourceDiscoveriesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeIpamResourceDiscoveriesOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamResourceDiscoveries sets the IpamResourceDiscoveries field's value.
+func (s *DescribeIpamResourceDiscoveriesOutput) SetIpamResourceDiscoveries(v []*IpamResourceDiscovery) *DescribeIpamResourceDiscoveriesOutput {
+ s.IpamResourceDiscoveries = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeIpamResourceDiscoveriesOutput) SetNextToken(v string) *DescribeIpamResourceDiscoveriesOutput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeIpamResourceDiscoveryAssociationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The resource discovery association filters.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The resource discovery association IDs.
+ IpamResourceDiscoveryAssociationIds []*string `locationName:"IpamResourceDiscoveryAssociationId" locationNameList:"item" type:"list"`
+
+ // The maximum number of resource discovery associations to return in one page
+ // of results.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // Specify the pagination token from a previous request to retrieve the next
+ // page of results.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeIpamResourceDiscoveryAssociationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeIpamResourceDiscoveryAssociationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeIpamResourceDiscoveryAssociationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeIpamResourceDiscoveryAssociationsInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeIpamResourceDiscoveryAssociationsInput) SetDryRun(v bool) *DescribeIpamResourceDiscoveryAssociationsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeIpamResourceDiscoveryAssociationsInput) SetFilters(v []*Filter) *DescribeIpamResourceDiscoveryAssociationsInput {
+ s.Filters = v
+ return s
+}
+
+// SetIpamResourceDiscoveryAssociationIds sets the IpamResourceDiscoveryAssociationIds field's value.
+func (s *DescribeIpamResourceDiscoveryAssociationsInput) SetIpamResourceDiscoveryAssociationIds(v []*string) *DescribeIpamResourceDiscoveryAssociationsInput {
+ s.IpamResourceDiscoveryAssociationIds = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeIpamResourceDiscoveryAssociationsInput) SetMaxResults(v int64) *DescribeIpamResourceDiscoveryAssociationsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeIpamResourceDiscoveryAssociationsInput) SetNextToken(v string) *DescribeIpamResourceDiscoveryAssociationsInput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeIpamResourceDiscoveryAssociationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The resource discovery associations.
+ IpamResourceDiscoveryAssociations []*IpamResourceDiscoveryAssociation `locationName:"ipamResourceDiscoveryAssociationSet" locationNameList:"item" type:"list"`
+
+ // Specify the pagination token from a previous request to retrieve the next
+ // page of results.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeIpamResourceDiscoveryAssociationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeIpamResourceDiscoveryAssociationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamResourceDiscoveryAssociations sets the IpamResourceDiscoveryAssociations field's value.
+func (s *DescribeIpamResourceDiscoveryAssociationsOutput) SetIpamResourceDiscoveryAssociations(v []*IpamResourceDiscoveryAssociation) *DescribeIpamResourceDiscoveryAssociationsOutput {
+ s.IpamResourceDiscoveryAssociations = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeIpamResourceDiscoveryAssociationsOutput) SetNextToken(v string) *DescribeIpamResourceDiscoveryAssociationsOutput {
+ s.NextToken = &v
+ return s
+}
+
type DescribeIpamScopesInput struct {
_ struct{} `type:"structure"`
@@ -90096,6 +96136,19 @@ type DescribeLaunchTemplateVersionsInput struct {
// The token to request the next page of results.
NextToken *string `type:"string"`
+ // If true, and if a Systems Manager parameter is specified for ImageId, the
+ // AMI ID is displayed in the response for imageId.
+ //
+ // If false, and if a Systems Manager parameter is specified for ImageId, the
+ // parameter is displayed in the response for imageId.
+ //
+ // For more information, see Use a Systems Manager parameter instead of an AMI
+ // ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ //
+ // Default: false
+ ResolveAlias *bool `type:"boolean"`
+
// One or more versions of the launch template. Valid values depend on whether
// you are describing a specified launch template (by ID or name) or all launch
// templates in your account.
@@ -90190,6 +96243,12 @@ func (s *DescribeLaunchTemplateVersionsInput) SetNextToken(v string) *DescribeLa
return s
}
+// SetResolveAlias sets the ResolveAlias field's value.
+func (s *DescribeLaunchTemplateVersionsInput) SetResolveAlias(v bool) *DescribeLaunchTemplateVersionsInput {
+ s.ResolveAlias = &v
+ return s
+}
+
// SetVersions sets the Versions field's value.
func (s *DescribeLaunchTemplateVersionsInput) SetVersions(v []*string) *DescribeLaunchTemplateVersionsInput {
s.Versions = v
@@ -91515,14 +97574,16 @@ type DescribeNatGatewaysInput struct {
// * vpc-id - The ID of the VPC in which the NAT gateway resides.
Filter []*Filter `locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
// One or more NAT gateway IDs.
NatGatewayIds []*string `locationName:"NatGatewayId" locationNameList:"item" type:"list"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -91593,8 +97654,8 @@ type DescribeNatGatewaysOutput struct {
// Information about the NAT gateways.
NatGateways []*NatGateway `locationName:"natGatewaySet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -91691,8 +97752,9 @@ type DescribeNetworkAclsInput struct {
// * vpc-id - The ID of the VPC for the network ACL.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
// One or more network ACL IDs.
@@ -91700,7 +97762,8 @@ type DescribeNetworkAclsInput struct {
// Default: Describes all your network ACLs.
NetworkAclIds []*string `locationName:"NetworkAclId" locationNameList:"item" type:"list"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -91771,8 +97834,8 @@ type DescribeNetworkAclsOutput struct {
// Information about one or more network ACLs.
NetworkAcls []*NetworkAcl `locationName:"networkAclSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -92256,7 +98319,27 @@ type DescribeNetworkInsightsPathsInput struct {
//
// * destination - The ID of the resource.
//
- // * destination-port - The destination port.
+ // * filter-at-source.source-address - The source IPv4 address at the source.
+ //
+ // * filter-at-source.source-port-range - The source port range at the source.
+ //
+ // * filter-at-source.destination-address - The destination IPv4 address
+ // at the source.
+ //
+ // * filter-at-source.destination-port-range - The destination port range
+ // at the source.
+ //
+ // * filter-at-destination.source-address - The source IPv4 address at the
+ // destination.
+ //
+ // * filter-at-destination.source-port-range - The source port range at the
+ // destination.
+ //
+ // * filter-at-destination.destination-address - The destination IPv4 address
+ // at the destination.
+ //
+ // * filter-at-destination.destination-port-range - The destination port
+ // range at the destination.
//
// * protocol - The protocol.
//
@@ -92533,15 +98616,17 @@ type DescribeNetworkInterfacePermissionsInput struct {
// | EIP-ASSOCIATE).
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another call with the returned NextToken value. If
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. If
// this parameter is not specified, up to 50 results are returned by default.
+ // For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
// The network interface permission IDs.
NetworkInterfacePermissionIds []*string `locationName:"NetworkInterfacePermissionId" type:"list"`
- // The token to request the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -92607,7 +98692,8 @@ type DescribeNetworkInterfacePermissionsOutput struct {
// The network interface permissions.
NetworkInterfacePermissions []*NetworkInterfacePermission `locationName:"networkInterfacePermissions" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -92762,10 +98848,10 @@ type DescribeNetworkInterfacesInput struct {
// * vpc-id - The ID of the VPC for the network interface.
Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"`
- // The maximum number of items to return for this request. The request returns
- // a token that you can specify in a subsequent call to get the next set of
- // results. You cannot specify this parameter and the network interface IDs
- // parameter in the same request.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. You
+ // cannot specify this parameter and the network interface IDs parameter in
+ // the same request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
// The network interface IDs.
@@ -92773,7 +98859,8 @@ type DescribeNetworkInterfacesInput struct {
// Default: Describes all your network interfaces.
NetworkInterfaceIds []*string `locationName:"NetworkInterfaceId" locationNameList:"item" type:"list"`
- // The token to retrieve the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -92838,15 +98925,14 @@ func (s *DescribeNetworkInterfacesInput) SetNextToken(v string) *DescribeNetwork
return s
}
-// Contains the output of DescribeNetworkInterfaces.
type DescribeNetworkInterfacesOutput struct {
_ struct{} `type:"structure"`
// Information about one or more network interfaces.
NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -93472,11 +99558,13 @@ type DescribeReplaceRootVolumeTasksInput struct {
// task was created.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"1" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// The ID of the root volume replacement task to view.
@@ -93547,8 +99635,8 @@ func (s *DescribeReplaceRootVolumeTasksInput) SetReplaceRootVolumeTaskIds(v []*s
type DescribeReplaceRootVolumeTasksOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the root volume replacement task.
@@ -94290,11 +100378,13 @@ type DescribeRouteTablesInput struct {
// * vpc-id - The ID of the VPC for the route table.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// One or more route table IDs.
@@ -94368,8 +100458,8 @@ func (s *DescribeRouteTablesInput) SetRouteTableIds(v []*string) *DescribeRouteT
type DescribeRouteTablesOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about one or more route tables.
@@ -94831,13 +100921,14 @@ type DescribeSecurityGroupRulesInput struct {
// the filter value.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another request with the returned NextToken value.
- // This value can be between 5 and 1000. If this parameter is not specified,
- // then all results are returned.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. This
+ // value can be between 5 and 1000. If this parameter is not specified, then
+ // all items are returned. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// The IDs of the security group rules.
@@ -94908,8 +100999,8 @@ func (s *DescribeSecurityGroupRulesInput) SetSecurityGroupRuleIds(v []*string) *
type DescribeSecurityGroupRulesOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about security group rules.
@@ -95049,13 +101140,14 @@ type DescribeSecurityGroupsInput struct {
// Default: Describes all of your security groups.
GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another request with the returned NextToken value.
- // This value can be between 5 and 1000. If this parameter is not specified,
- // then all results are returned.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. This
+ // value can be between 5 and 1000. If this parameter is not specified, then
+ // all items are returned. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token to request the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -95129,8 +101221,8 @@ func (s *DescribeSecurityGroupsInput) SetNextToken(v string) *DescribeSecurityGr
type DescribeSecurityGroupsOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the security groups.
@@ -95310,11 +101402,13 @@ type DescribeSnapshotTierStatusInput struct {
// | temporary-restore-completed | temporary-restore-failed)
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -95363,8 +101457,8 @@ func (s *DescribeSnapshotTierStatusInput) SetNextToken(v string) *DescribeSnapsh
type DescribeSnapshotTierStatusOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the snapshot's storage tier.
@@ -95450,22 +101544,15 @@ type DescribeSnapshotsInput struct {
// * volume-size - The size of the volume, in GiB.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of snapshot results returned by DescribeSnapshots in paginated
- // output. When this parameter is used, DescribeSnapshots only returns MaxResults
- // results in a single page along with a NextToken response element. The remaining
- // results of the initial request can be seen by sending another DescribeSnapshots
- // request with the returned NextToken value. This value can be between 5 and
- // 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results
- // are returned. If this parameter is not used, then DescribeSnapshots returns
- // all results. You cannot specify this parameter and the snapshot IDs parameter
- // in the same request.
+ // The maximum number of snapshots to return for this request. This value can
+ // be between 5 and 1,000; if this value is larger than 1,000, only 1,000 results
+ // are returned. If this parameter is not used, then the request returns all
+ // snapshots. You cannot specify this parameter and the snapshot IDs parameter
+ // in the same request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The NextToken value returned from a previous paginated DescribeSnapshots
- // request where MaxResults was used and the results exceeded the value of that
- // parameter. Pagination continues from the end of the previous results that
- // returned the NextToken value. This value is null when there are no more results
- // to return.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// Scopes the results to snapshots with the specified owners. You can specify
@@ -95545,10 +101632,8 @@ func (s *DescribeSnapshotsInput) SetSnapshotIds(v []*string) *DescribeSnapshotsI
type DescribeSnapshotsOutput struct {
_ struct{} `type:"structure"`
- // The NextToken value to include in a future DescribeSnapshots request. When
- // the results of a DescribeSnapshots request exceed MaxResults, this value
- // can be used to retrieve the next page of results. This value is null when
- // there are no more results to return.
+ // The token to include in another request to return the next page of snapshots.
+ // This value is null when there are no more snapshots to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the snapshots.
@@ -95662,12 +101747,13 @@ type DescribeSpotFleetInstancesInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
- // The token for the next set of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The ID of the Spot Fleet request.
@@ -95742,8 +101828,8 @@ type DescribeSpotFleetInstancesOutput struct {
// of date.
ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list"`
- // The token required to retrieve the next set of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The ID of the Spot Fleet request.
@@ -95799,12 +101885,13 @@ type DescribeSpotFleetRequestHistoryInput struct {
// The type of events to describe. By default, all events are described.
EventType *string `locationName:"eventType" type:"string" enum:"EventType"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
- // The token for the next set of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The ID of the Spot Fleet request.
@@ -95901,11 +101988,11 @@ type DescribeSpotFleetRequestHistoryOutput struct {
// The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
// All records up to this time were retrieved.
//
- // If nextToken indicates that there are more results, this value is not present.
+ // If nextToken indicates that there are more items, this value is not present.
LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp"`
- // The token required to retrieve the next set of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The ID of the Spot Fleet request.
@@ -95973,12 +102060,13 @@ type DescribeSpotFleetRequestsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `locationName:"maxResults" type:"integer"`
- // The token for the next set of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The IDs of the Spot Fleet requests.
@@ -96031,8 +102119,8 @@ func (s *DescribeSpotFleetRequestsInput) SetSpotFleetRequestIds(v []*string) *De
type DescribeSpotFleetRequestsOutput struct {
_ struct{} `type:"structure"`
- // The token required to retrieve the next set of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the configuration of your Spot Fleet.
@@ -96187,13 +102275,13 @@ type DescribeSpotInstanceRequestsInput struct {
// * valid-until - The end date of the request.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return in a single call. Specify a value
- // between 5 and 1000. To retrieve the remaining results, make another call
- // with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token to request the next set of results. This value is null when there
- // are no more results to return.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// One or more Spot Instance request IDs.
@@ -96252,8 +102340,8 @@ func (s *DescribeSpotInstanceRequestsInput) SetSpotInstanceRequestIds(v []*strin
type DescribeSpotInstanceRequestsOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next set of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// One or more Spot Instance requests.
@@ -96330,12 +102418,13 @@ type DescribeSpotPriceHistoryInput struct {
// Filters the results by the specified instance types.
InstanceTypes []*string `locationName:"InstanceType" type:"list" enum:"InstanceType"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `locationName:"maxResults" type:"integer"`
- // The token for the next set of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `locationName:"nextToken" type:"string"`
// Filters the results by the specified basic product descriptions.
@@ -96422,8 +102511,8 @@ func (s *DescribeSpotPriceHistoryInput) SetStartTime(v time.Time) *DescribeSpotP
type DescribeSpotPriceHistoryOutput struct {
_ struct{} `type:"structure"`
- // The token required to retrieve the next set of results. This value is null
- // or an empty string when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The historical Spot prices.
@@ -96469,13 +102558,13 @@ type DescribeStaleSecurityGroupsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // The maximum number of items to return for this request. The request returns
- // a token that you can specify in a subsequent call to get the next set of
- // results.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next set of items to return. (You received this token from
- // a prior call.)
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `min:"1" type:"string"`
// The ID of the VPC.
@@ -96548,8 +102637,8 @@ func (s *DescribeStaleSecurityGroupsInput) SetVpcId(v string) *DescribeStaleSecu
type DescribeStaleSecurityGroupsOutput struct {
_ struct{} `type:"structure"`
- // The token to use when requesting the next set of items. If there are no additional
- // items to return, the string is empty.
+ // The token to include in another request to get the next page of items. If
+ // there are no additional items to return, the string is empty.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the stale security groups.
@@ -96608,13 +102697,16 @@ type DescribeStoreImageTasksInput struct {
// in a request.
ImageIds []*string `locationName:"ImageId" locationNameList:"item" type:"list"`
- // The maximum number of results to return in a single call. To retrieve the
- // remaining results, make another call with the returned NextToken value. This
- // value can be between 1 and 200. You cannot specify this parameter and the
- // ImageIDs parameter in the same call.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
+ //
+ // You cannot specify this parameter and the ImageIDs parameter in the same
+ // call.
MaxResults *int64 `min:"1" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -96682,8 +102774,8 @@ func (s *DescribeStoreImageTasksInput) SetNextToken(v string) *DescribeStoreImag
type DescribeStoreImageTasksOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The information about the AMI store tasks.
@@ -96815,11 +102907,13 @@ type DescribeSubnetsInput struct {
// * vpc-id - The ID of the VPC for the subnet.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// One or more subnet IDs.
@@ -96892,8 +102986,8 @@ func (s *DescribeSubnetsInput) SetSubnetIds(v []*string) *DescribeSubnetsInput {
type DescribeSubnetsOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about one or more subnets.
@@ -96960,12 +103054,13 @@ type DescribeTagsInput struct {
// * value - The tag value.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return in a single call. This value can
- // be between 5 and 1000. To retrieve the remaining results, make another call
- // with the returned NextToken value.
+ // The maximum number of items to return for this request. This value can be
+ // between 5 and 1000. To get the next page of items, make another request with
+ // the token returned in the output. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `locationName:"maxResults" type:"integer"`
- // The token to retrieve the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -97014,8 +103109,8 @@ func (s *DescribeTagsInput) SetNextToken(v string) *DescribeTagsInput {
type DescribeTagsOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The tags.
@@ -98973,6 +105068,659 @@ func (s *DescribeTrunkInterfaceAssociationsOutput) SetNextToken(v string) *Descr
return s
}
+type DescribeVerifiedAccessEndpointsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters. Filter names and values are case-sensitive.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ VerifiedAccessEndpointIds []*string `locationName:"VerifiedAccessEndpointId" locationNameList:"item" type:"list"`
+
+ // The ID of the Amazon Web Services Verified Access group.
+ VerifiedAccessGroupId *string `type:"string"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstanceId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessEndpointsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessEndpointsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeVerifiedAccessEndpointsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeVerifiedAccessEndpointsInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeVerifiedAccessEndpointsInput) SetDryRun(v bool) *DescribeVerifiedAccessEndpointsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeVerifiedAccessEndpointsInput) SetFilters(v []*Filter) *DescribeVerifiedAccessEndpointsInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeVerifiedAccessEndpointsInput) SetMaxResults(v int64) *DescribeVerifiedAccessEndpointsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessEndpointsInput) SetNextToken(v string) *DescribeVerifiedAccessEndpointsInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessEndpointIds sets the VerifiedAccessEndpointIds field's value.
+func (s *DescribeVerifiedAccessEndpointsInput) SetVerifiedAccessEndpointIds(v []*string) *DescribeVerifiedAccessEndpointsInput {
+ s.VerifiedAccessEndpointIds = v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *DescribeVerifiedAccessEndpointsInput) SetVerifiedAccessGroupId(v string) *DescribeVerifiedAccessEndpointsInput {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *DescribeVerifiedAccessEndpointsInput) SetVerifiedAccessInstanceId(v string) *DescribeVerifiedAccessEndpointsInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+type DescribeVerifiedAccessEndpointsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ VerifiedAccessEndpoints []*VerifiedAccessEndpoint `locationName:"verifiedAccessEndpointSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessEndpointsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessEndpointsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessEndpointsOutput) SetNextToken(v string) *DescribeVerifiedAccessEndpointsOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessEndpoints sets the VerifiedAccessEndpoints field's value.
+func (s *DescribeVerifiedAccessEndpointsOutput) SetVerifiedAccessEndpoints(v []*VerifiedAccessEndpoint) *DescribeVerifiedAccessEndpointsOutput {
+ s.VerifiedAccessEndpoints = v
+ return s
+}
+
+type DescribeVerifiedAccessGroupsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters. Filter names and values are case-sensitive.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+
+ // The ID of the Amazon Web Services Verified Access groups.
+ VerifiedAccessGroupIds []*string `locationName:"VerifiedAccessGroupId" locationNameList:"item" type:"list"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstanceId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessGroupsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessGroupsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeVerifiedAccessGroupsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeVerifiedAccessGroupsInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeVerifiedAccessGroupsInput) SetDryRun(v bool) *DescribeVerifiedAccessGroupsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeVerifiedAccessGroupsInput) SetFilters(v []*Filter) *DescribeVerifiedAccessGroupsInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeVerifiedAccessGroupsInput) SetMaxResults(v int64) *DescribeVerifiedAccessGroupsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessGroupsInput) SetNextToken(v string) *DescribeVerifiedAccessGroupsInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessGroupIds sets the VerifiedAccessGroupIds field's value.
+func (s *DescribeVerifiedAccessGroupsInput) SetVerifiedAccessGroupIds(v []*string) *DescribeVerifiedAccessGroupsInput {
+ s.VerifiedAccessGroupIds = v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *DescribeVerifiedAccessGroupsInput) SetVerifiedAccessInstanceId(v string) *DescribeVerifiedAccessGroupsInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+type DescribeVerifiedAccessGroupsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The ID of the Verified Access group.
+ VerifiedAccessGroups []*VerifiedAccessGroup `locationName:"verifiedAccessGroupSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessGroupsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessGroupsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessGroupsOutput) SetNextToken(v string) *DescribeVerifiedAccessGroupsOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessGroups sets the VerifiedAccessGroups field's value.
+func (s *DescribeVerifiedAccessGroupsOutput) SetVerifiedAccessGroups(v []*VerifiedAccessGroup) *DescribeVerifiedAccessGroupsOutput {
+ s.VerifiedAccessGroups = v
+ return s
+}
+
+type DescribeVerifiedAccessInstanceLoggingConfigurationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters. Filter names and values are case-sensitive.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"1" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+
+ // The IDs of the Amazon Web Services Verified Access instances.
+ VerifiedAccessInstanceIds []*string `locationName:"VerifiedAccessInstanceId" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessInstanceLoggingConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessInstanceLoggingConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeVerifiedAccessInstanceLoggingConfigurationsInput"}
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) SetDryRun(v bool) *DescribeVerifiedAccessInstanceLoggingConfigurationsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) SetFilters(v []*Filter) *DescribeVerifiedAccessInstanceLoggingConfigurationsInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) SetMaxResults(v int64) *DescribeVerifiedAccessInstanceLoggingConfigurationsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) SetNextToken(v string) *DescribeVerifiedAccessInstanceLoggingConfigurationsInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceIds sets the VerifiedAccessInstanceIds field's value.
+func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsInput) SetVerifiedAccessInstanceIds(v []*string) *DescribeVerifiedAccessInstanceLoggingConfigurationsInput {
+ s.VerifiedAccessInstanceIds = v
+ return s
+}
+
+type DescribeVerifiedAccessInstanceLoggingConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The current logging configuration for the Amazon Web Services Verified Access
+ // instances.
+ LoggingConfigurations []*VerifiedAccessInstanceLoggingConfiguration `locationName:"loggingConfigurationSet" locationNameList:"item" type:"list"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessInstanceLoggingConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessInstanceLoggingConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetLoggingConfigurations sets the LoggingConfigurations field's value.
+func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsOutput) SetLoggingConfigurations(v []*VerifiedAccessInstanceLoggingConfiguration) *DescribeVerifiedAccessInstanceLoggingConfigurationsOutput {
+ s.LoggingConfigurations = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessInstanceLoggingConfigurationsOutput) SetNextToken(v string) *DescribeVerifiedAccessInstanceLoggingConfigurationsOutput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeVerifiedAccessInstancesInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters. Filter names and values are case-sensitive.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+
+ // The IDs of the Amazon Web Services Verified Access instances.
+ VerifiedAccessInstanceIds []*string `locationName:"VerifiedAccessInstanceId" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessInstancesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessInstancesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeVerifiedAccessInstancesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeVerifiedAccessInstancesInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeVerifiedAccessInstancesInput) SetDryRun(v bool) *DescribeVerifiedAccessInstancesInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeVerifiedAccessInstancesInput) SetFilters(v []*Filter) *DescribeVerifiedAccessInstancesInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeVerifiedAccessInstancesInput) SetMaxResults(v int64) *DescribeVerifiedAccessInstancesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessInstancesInput) SetNextToken(v string) *DescribeVerifiedAccessInstancesInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceIds sets the VerifiedAccessInstanceIds field's value.
+func (s *DescribeVerifiedAccessInstancesInput) SetVerifiedAccessInstanceIds(v []*string) *DescribeVerifiedAccessInstancesInput {
+ s.VerifiedAccessInstanceIds = v
+ return s
+}
+
+type DescribeVerifiedAccessInstancesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The IDs of the Amazon Web Services Verified Access instances.
+ VerifiedAccessInstances []*VerifiedAccessInstance `locationName:"verifiedAccessInstanceSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessInstancesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessInstancesOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessInstancesOutput) SetNextToken(v string) *DescribeVerifiedAccessInstancesOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessInstances sets the VerifiedAccessInstances field's value.
+func (s *DescribeVerifiedAccessInstancesOutput) SetVerifiedAccessInstances(v []*VerifiedAccessInstance) *DescribeVerifiedAccessInstancesOutput {
+ s.VerifiedAccessInstances = v
+ return s
+}
+
+type DescribeVerifiedAccessTrustProvidersInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // One or more filters. Filter names and values are case-sensitive.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // The maximum number of results to return with a single call. To retrieve the
+ // remaining results, make another call with the returned nextToken value.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // The token for the next page of results.
+ NextToken *string `type:"string"`
+
+ // The IDs of the Amazon Web Services Verified Access trust providers.
+ VerifiedAccessTrustProviderIds []*string `locationName:"VerifiedAccessTrustProviderId" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessTrustProvidersInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessTrustProvidersInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeVerifiedAccessTrustProvidersInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeVerifiedAccessTrustProvidersInput"}
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DescribeVerifiedAccessTrustProvidersInput) SetDryRun(v bool) *DescribeVerifiedAccessTrustProvidersInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *DescribeVerifiedAccessTrustProvidersInput) SetFilters(v []*Filter) *DescribeVerifiedAccessTrustProvidersInput {
+ s.Filters = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeVerifiedAccessTrustProvidersInput) SetMaxResults(v int64) *DescribeVerifiedAccessTrustProvidersInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessTrustProvidersInput) SetNextToken(v string) *DescribeVerifiedAccessTrustProvidersInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessTrustProviderIds sets the VerifiedAccessTrustProviderIds field's value.
+func (s *DescribeVerifiedAccessTrustProvidersInput) SetVerifiedAccessTrustProviderIds(v []*string) *DescribeVerifiedAccessTrustProvidersInput {
+ s.VerifiedAccessTrustProviderIds = v
+ return s
+}
+
+type DescribeVerifiedAccessTrustProvidersOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token to use to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The IDs of the Amazon Web Services Verified Access trust providers.
+ VerifiedAccessTrustProviders []*VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProviderSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessTrustProvidersOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeVerifiedAccessTrustProvidersOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeVerifiedAccessTrustProvidersOutput) SetNextToken(v string) *DescribeVerifiedAccessTrustProvidersOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetVerifiedAccessTrustProviders sets the VerifiedAccessTrustProviders field's value.
+func (s *DescribeVerifiedAccessTrustProvidersOutput) SetVerifiedAccessTrustProviders(v []*VerifiedAccessTrustProvider) *DescribeVerifiedAccessTrustProvidersOutput {
+ s.VerifiedAccessTrustProviders = v
+ return s
+}
+
type DescribeVolumeAttributeInput struct {
_ struct{} `type:"structure"`
@@ -99136,20 +105884,16 @@ type DescribeVolumeStatusInput struct {
// | insufficient-data).
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of volume results returned by DescribeVolumeStatus in
- // paginated output. When this parameter is used, the request only returns MaxResults
- // results in a single page along with a NextToken response element. The remaining
- // results of the initial request can be seen by sending another request with
- // the returned NextToken value. This value can be between 5 and 1,000; if MaxResults
- // is given a value larger than 1,000, only 1,000 results are returned. If this
- // parameter is not used, then DescribeVolumeStatus returns all results. You
- // cannot specify this parameter and the volume IDs parameter in the same request.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. This
+ // value can be between 5 and 1,000; if the value is larger than 1,000, only
+ // 1,000 results are returned. If this parameter is not used, then all items
+ // are returned. You cannot specify this parameter and the volume IDs parameter
+ // in the same request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The NextToken value to include in a future DescribeVolumeStatus request.
- // When the results of the request exceed MaxResults, this value can be used
- // to retrieve the next page of results. This value is null when there are no
- // more results to return.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// The IDs of the volumes.
@@ -99209,8 +105953,8 @@ func (s *DescribeVolumeStatusInput) SetVolumeIds(v []*string) *DescribeVolumeSta
type DescribeVolumeStatusOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the status of the volumes.
@@ -99306,21 +106050,15 @@ type DescribeVolumesInput struct {
// | sc1| standard)
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of volume results returned by DescribeVolumes in paginated
- // output. When this parameter is used, DescribeVolumes only returns MaxResults
- // results in a single page along with a NextToken response element. The remaining
- // results of the initial request can be seen by sending another DescribeVolumes
- // request with the returned NextToken value. This value can be between 5 and
- // 500; if MaxResults is given a value larger than 500, only 500 results are
- // returned. If this parameter is not used, then DescribeVolumes returns all
- // results. You cannot specify this parameter and the volume IDs parameter in
- // the same request.
+ // The maximum number of volumes to return for this request. This value can
+ // be between 5 and 500; if you specify a value larger than 500, only 500 items
+ // are returned. If this parameter is not used, then all items are returned.
+ // You cannot specify this parameter and the volume IDs parameter in the same
+ // request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `locationName:"maxResults" type:"integer"`
- // The NextToken value returned from a previous paginated DescribeVolumes request
- // where MaxResults was used and the results exceeded the value of that parameter.
- // Pagination continues from the end of the previous results that returned the
- // NextToken value. This value is null when there are no more results to return.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned from the previous request.
NextToken *string `locationName:"nextToken" type:"string"`
// The volume IDs.
@@ -99415,10 +106153,11 @@ type DescribeVolumesModificationsInput struct {
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The maximum number of results (up to a limit of 500) to be returned in a
- // paginated request.
+ // paginated request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The nextToken value returned by a previous paginated request.
+ // The token returned by a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// The IDs of the volumes.
@@ -99476,7 +106215,8 @@ func (s *DescribeVolumesModificationsInput) SetVolumeIds(v []*string) *DescribeV
type DescribeVolumesModificationsOutput struct {
_ struct{} `type:"structure"`
- // Token for pagination, null if there are no more results
+ // The token to include in another request to get the next page of items. This
+ // value is null if there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the volume modifications.
@@ -99516,10 +106256,8 @@ func (s *DescribeVolumesModificationsOutput) SetVolumesModifications(v []*Volume
type DescribeVolumesOutput struct {
_ struct{} `type:"structure"`
- // The NextToken value to include in a future DescribeVolumes request. When
- // the results of a DescribeVolumes request exceed MaxResults, this value can
- // be used to retrieve the next page of results. This value is null when there
- // are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the volumes.
@@ -99693,11 +106431,13 @@ func (s *DescribeVpcAttributeOutput) SetVpcId(v string) *DescribeVpcAttributeOut
type DescribeVpcClassicLinkDnsSupportInput struct {
_ struct{} `type:"structure"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `locationName:"nextToken" min:"1" type:"string"`
// One or more VPC IDs.
@@ -99759,8 +106499,8 @@ func (s *DescribeVpcClassicLinkDnsSupportInput) SetVpcIds(v []*string) *Describe
type DescribeVpcClassicLinkDnsSupportOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" min:"1" type:"string"`
// Information about the ClassicLink DNS support status of the VPCs.
@@ -99905,7 +106645,7 @@ type DescribeVpcEndpointConnectionNotificationsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // One or more filters.
+ // The filters.
//
// * connection-notification-arn - The ARN of the SNS topic for the notification.
//
@@ -99980,7 +106720,7 @@ func (s *DescribeVpcEndpointConnectionNotificationsInput) SetNextToken(v string)
type DescribeVpcEndpointConnectionNotificationsOutput struct {
_ struct{} `type:"structure"`
- // One or more notifications.
+ // The notifications.
ConnectionNotificationSet []*ConnectionNotification `locationName:"connectionNotificationSet" locationNameList:"item" type:"list"`
// The token to use to retrieve the next page of results. This value is null
@@ -100027,7 +106767,7 @@ type DescribeVpcEndpointConnectionsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // One or more filters.
+ // The filters.
//
// * ip-address-type - The IP address type (ipv4 | ipv6).
//
@@ -100102,7 +106842,7 @@ type DescribeVpcEndpointConnectionsOutput struct {
// when there are no more results to return.
NextToken *string `locationName:"nextToken" type:"string"`
- // Information about one or more VPC endpoint connections.
+ // Information about the VPC endpoint connections.
VpcEndpointConnections []*VpcEndpointConnection `locationName:"vpcEndpointConnectionSet" locationNameList:"item" type:"list"`
}
@@ -100145,7 +106885,7 @@ type DescribeVpcEndpointServiceConfigurationsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // One or more filters.
+ // The filters.
//
// * service-name - The name of the service.
//
@@ -100177,7 +106917,7 @@ type DescribeVpcEndpointServiceConfigurationsInput struct {
// The token to retrieve the next page of results.
NextToken *string `type:"string"`
- // The IDs of one or more services.
+ // The IDs of the endpoint services.
ServiceIds []*string `locationName:"ServiceId" locationNameList:"item" type:"list"`
}
@@ -100236,7 +106976,7 @@ type DescribeVpcEndpointServiceConfigurationsOutput struct {
// when there are no more results to return.
NextToken *string `locationName:"nextToken" type:"string"`
- // Information about one or more services.
+ // Information about the services.
ServiceConfigurations []*ServiceConfiguration `locationName:"serviceConfigurationSet" locationNameList:"item" type:"list"`
}
@@ -100279,7 +107019,7 @@ type DescribeVpcEndpointServicePermissionsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // One or more filters.
+ // The filters.
//
// * principal - The ARN of the principal.
//
@@ -100367,7 +107107,7 @@ func (s *DescribeVpcEndpointServicePermissionsInput) SetServiceId(v string) *Des
type DescribeVpcEndpointServicePermissionsOutput struct {
_ struct{} `type:"structure"`
- // Information about one or more allowed principals.
+ // Information about the allowed principals.
AllowedPrincipals []*AllowedPrincipal `locationName:"allowedPrincipals" locationNameList:"item" type:"list"`
// The token to use to retrieve the next page of results. This value is null
@@ -100405,7 +107145,6 @@ func (s *DescribeVpcEndpointServicePermissionsOutput) SetNextToken(v string) *De
return s
}
-// Contains the parameters for DescribeVpcEndpointServices.
type DescribeVpcEndpointServicesInput struct {
_ struct{} `type:"structure"`
@@ -100415,7 +107154,7 @@ type DescribeVpcEndpointServicesInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // One or more filters.
+ // The filters.
//
// * owner - The ID or alias of the Amazon Web Services account that owns
// the service.
@@ -100448,7 +107187,7 @@ type DescribeVpcEndpointServicesInput struct {
// a prior call.)
NextToken *string `type:"string"`
- // One or more service names.
+ // The service names.
ServiceNames []*string `locationName:"ServiceName" locationNameList:"item" type:"list"`
}
@@ -100500,7 +107239,6 @@ func (s *DescribeVpcEndpointServicesInput) SetServiceNames(v []*string) *Describ
return s
}
-// Contains the output of DescribeVpcEndpointServices.
type DescribeVpcEndpointServicesOutput struct {
_ struct{} `type:"structure"`
@@ -100511,7 +107249,7 @@ type DescribeVpcEndpointServicesOutput struct {
// Information about the service.
ServiceDetails []*ServiceDetail `locationName:"serviceDetailSet" locationNameList:"item" type:"list"`
- // A list of supported services.
+ // The supported services.
ServiceNames []*string `locationName:"serviceNameSet" locationNameList:"item" type:"list"`
}
@@ -100551,7 +107289,6 @@ func (s *DescribeVpcEndpointServicesOutput) SetServiceNames(v []*string) *Descri
return s
}
-// Contains the parameters for DescribeVpcEndpoints.
type DescribeVpcEndpointsInput struct {
_ struct{} `type:"structure"`
@@ -100561,7 +107298,7 @@ type DescribeVpcEndpointsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // One or more filters.
+ // The filters.
//
// * ip-address-type - The IP address type (ipv4 | ipv6).
//
@@ -100599,7 +107336,7 @@ type DescribeVpcEndpointsInput struct {
// a prior call.)
NextToken *string `type:"string"`
- // One or more endpoint IDs.
+ // The IDs of the VPC endpoints.
VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list"`
}
@@ -100651,7 +107388,6 @@ func (s *DescribeVpcEndpointsInput) SetVpcEndpointIds(v []*string) *DescribeVpcE
return s
}
-// Contains the output of DescribeVpcEndpoints.
type DescribeVpcEndpointsOutput struct {
_ struct{} `type:"structure"`
@@ -100740,11 +107476,13 @@ type DescribeVpcPeeringConnectionsInput struct {
// * vpc-peering-connection-id - The ID of the VPC peering connection.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// One or more VPC peering connection IDs.
@@ -100817,8 +107555,8 @@ func (s *DescribeVpcPeeringConnectionsInput) SetVpcPeeringConnectionIds(v []*str
type DescribeVpcPeeringConnectionsOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the VPC peering connections.
@@ -100913,11 +107651,13 @@ type DescribeVpcsInput struct {
// * vpc-id - The ID of the VPC.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// One or more VPC IDs.
@@ -100990,8 +107730,8 @@ func (s *DescribeVpcsInput) SetVpcIds(v []*string) *DescribeVpcsInput {
type DescribeVpcsOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about one or more VPCs.
@@ -101674,6 +108414,129 @@ func (s DetachNetworkInterfaceOutput) GoString() string {
return s.String()
}
+type DetachVerifiedAccessTrustProviderInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ //
+ // VerifiedAccessInstanceId is a required field
+ VerifiedAccessInstanceId *string `type:"string" required:"true"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ //
+ // VerifiedAccessTrustProviderId is a required field
+ VerifiedAccessTrustProviderId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DetachVerifiedAccessTrustProviderInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DetachVerifiedAccessTrustProviderInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DetachVerifiedAccessTrustProviderInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DetachVerifiedAccessTrustProviderInput"}
+ if s.VerifiedAccessInstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessInstanceId"))
+ }
+ if s.VerifiedAccessTrustProviderId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessTrustProviderId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *DetachVerifiedAccessTrustProviderInput) SetClientToken(v string) *DetachVerifiedAccessTrustProviderInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DetachVerifiedAccessTrustProviderInput) SetDryRun(v bool) *DetachVerifiedAccessTrustProviderInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *DetachVerifiedAccessTrustProviderInput) SetVerifiedAccessInstanceId(v string) *DetachVerifiedAccessTrustProviderInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+// SetVerifiedAccessTrustProviderId sets the VerifiedAccessTrustProviderId field's value.
+func (s *DetachVerifiedAccessTrustProviderInput) SetVerifiedAccessTrustProviderId(v string) *DetachVerifiedAccessTrustProviderInput {
+ s.VerifiedAccessTrustProviderId = &v
+ return s
+}
+
+type DetachVerifiedAccessTrustProviderOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DetachVerifiedAccessTrustProviderOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DetachVerifiedAccessTrustProviderOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessInstance sets the VerifiedAccessInstance field's value.
+func (s *DetachVerifiedAccessTrustProviderOutput) SetVerifiedAccessInstance(v *VerifiedAccessInstance) *DetachVerifiedAccessTrustProviderOutput {
+ s.VerifiedAccessInstance = v
+ return s
+}
+
+// SetVerifiedAccessTrustProvider sets the VerifiedAccessTrustProvider field's value.
+func (s *DetachVerifiedAccessTrustProviderOutput) SetVerifiedAccessTrustProvider(v *VerifiedAccessTrustProvider) *DetachVerifiedAccessTrustProviderOutput {
+ s.VerifiedAccessTrustProvider = v
+ return s
+}
+
type DetachVolumeInput struct {
_ struct{} `type:"structure"`
@@ -101861,6 +108724,39 @@ func (s DetachVpnGatewayOutput) GoString() string {
return s.String()
}
+// Options for an Amazon Web Services Verified Access device-identity based
+// trust provider.
+type DeviceOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the tenant application with the device-identity provider.
+ TenantId *string `locationName:"tenantId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeviceOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeviceOptions) GoString() string {
+ return s.String()
+}
+
+// SetTenantId sets the TenantId field's value.
+func (s *DeviceOptions) SetTenantId(v string) *DeviceOptions {
+ s.TenantId = &v
+ return s
+}
+
// Describes a DHCP configuration option.
type DhcpConfiguration struct {
_ struct{} `type:"structure"`
@@ -102362,7 +109258,8 @@ type DisableFastLaunchOutput struct {
// snapshots.
LaunchTemplate *FastLaunchLaunchTemplateSpecificationResponse `locationName:"launchTemplate" type:"structure"`
- // The maximum number of parallel instances to launch for creating resources.
+ // The maximum number of instances that Amazon EC2 can launch at the same time
+ // to create pre-provisioned snapshots for Windows faster launching.
MaxParallelLaunches *int64 `locationName:"maxParallelLaunches" type:"integer"`
// The owner of the Windows AMI for which faster launching was turned off.
@@ -103615,7 +110512,9 @@ type DisassociateEnclaveCertificateIamRoleInput struct {
_ struct{} `type:"structure"`
// The ARN of the ACM certificate from which to disassociate the IAM role.
- CertificateArn *string `min:"1" type:"string"`
+ //
+ // CertificateArn is a required field
+ CertificateArn *string `type:"string" required:"true"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
@@ -103624,7 +110523,9 @@ type DisassociateEnclaveCertificateIamRoleInput struct {
DryRun *bool `type:"boolean"`
// The ARN of the IAM role to disassociate.
- RoleArn *string `min:"1" type:"string"`
+ //
+ // RoleArn is a required field
+ RoleArn *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -103648,11 +110549,11 @@ func (s DisassociateEnclaveCertificateIamRoleInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *DisassociateEnclaveCertificateIamRoleInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DisassociateEnclaveCertificateIamRoleInput"}
- if s.CertificateArn != nil && len(*s.CertificateArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1))
+ if s.CertificateArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateArn"))
}
- if s.RoleArn != nil && len(*s.RoleArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1))
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
}
if invalidParams.Len() > 0 {
@@ -103890,6 +110791,221 @@ func (s *DisassociateInstanceEventWindowOutput) SetInstanceEventWindow(v *Instan
return s
}
+type DisassociateIpamResourceDiscoveryInput struct {
+ _ struct{} `type:"structure"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // A resource discovery association ID.
+ //
+ // IpamResourceDiscoveryAssociationId is a required field
+ IpamResourceDiscoveryAssociationId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisassociateIpamResourceDiscoveryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisassociateIpamResourceDiscoveryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DisassociateIpamResourceDiscoveryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DisassociateIpamResourceDiscoveryInput"}
+ if s.IpamResourceDiscoveryAssociationId == nil {
+ invalidParams.Add(request.NewErrParamRequired("IpamResourceDiscoveryAssociationId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DisassociateIpamResourceDiscoveryInput) SetDryRun(v bool) *DisassociateIpamResourceDiscoveryInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryAssociationId sets the IpamResourceDiscoveryAssociationId field's value.
+func (s *DisassociateIpamResourceDiscoveryInput) SetIpamResourceDiscoveryAssociationId(v string) *DisassociateIpamResourceDiscoveryInput {
+ s.IpamResourceDiscoveryAssociationId = &v
+ return s
+}
+
+type DisassociateIpamResourceDiscoveryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A resource discovery association.
+ IpamResourceDiscoveryAssociation *IpamResourceDiscoveryAssociation `locationName:"ipamResourceDiscoveryAssociation" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisassociateIpamResourceDiscoveryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisassociateIpamResourceDiscoveryOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamResourceDiscoveryAssociation sets the IpamResourceDiscoveryAssociation field's value.
+func (s *DisassociateIpamResourceDiscoveryOutput) SetIpamResourceDiscoveryAssociation(v *IpamResourceDiscoveryAssociation) *DisassociateIpamResourceDiscoveryOutput {
+ s.IpamResourceDiscoveryAssociation = v
+ return s
+}
+
+type DisassociateNatGatewayAddressInput struct {
+ _ struct{} `type:"structure"`
+
+ // The association IDs of EIPs that have been associated with the NAT gateway.
+ //
+ // AssociationIds is a required field
+ AssociationIds []*string `locationName:"AssociationId" locationNameList:"item" type:"list" required:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The maximum amount of time to wait (in seconds) before forcibly releasing
+ // the IP addresses if connections are still in progress. Default value is 350
+ // seconds.
+ MaxDrainDurationSeconds *int64 `min:"1" type:"integer"`
+
+ // The NAT gateway ID.
+ //
+ // NatGatewayId is a required field
+ NatGatewayId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisassociateNatGatewayAddressInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisassociateNatGatewayAddressInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DisassociateNatGatewayAddressInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DisassociateNatGatewayAddressInput"}
+ if s.AssociationIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("AssociationIds"))
+ }
+ if s.MaxDrainDurationSeconds != nil && *s.MaxDrainDurationSeconds < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxDrainDurationSeconds", 1))
+ }
+ if s.NatGatewayId == nil {
+ invalidParams.Add(request.NewErrParamRequired("NatGatewayId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAssociationIds sets the AssociationIds field's value.
+func (s *DisassociateNatGatewayAddressInput) SetAssociationIds(v []*string) *DisassociateNatGatewayAddressInput {
+ s.AssociationIds = v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DisassociateNatGatewayAddressInput) SetDryRun(v bool) *DisassociateNatGatewayAddressInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetMaxDrainDurationSeconds sets the MaxDrainDurationSeconds field's value.
+func (s *DisassociateNatGatewayAddressInput) SetMaxDrainDurationSeconds(v int64) *DisassociateNatGatewayAddressInput {
+ s.MaxDrainDurationSeconds = &v
+ return s
+}
+
+// SetNatGatewayId sets the NatGatewayId field's value.
+func (s *DisassociateNatGatewayAddressInput) SetNatGatewayId(v string) *DisassociateNatGatewayAddressInput {
+ s.NatGatewayId = &v
+ return s
+}
+
+type DisassociateNatGatewayAddressOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the NAT gateway IP addresses.
+ NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"`
+
+ // The NAT gateway ID.
+ NatGatewayId *string `locationName:"natGatewayId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisassociateNatGatewayAddressOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisassociateNatGatewayAddressOutput) GoString() string {
+ return s.String()
+}
+
+// SetNatGatewayAddresses sets the NatGatewayAddresses field's value.
+func (s *DisassociateNatGatewayAddressOutput) SetNatGatewayAddresses(v []*NatGatewayAddress) *DisassociateNatGatewayAddressOutput {
+ s.NatGatewayAddresses = v
+ return s
+}
+
+// SetNatGatewayId sets the NatGatewayId field's value.
+func (s *DisassociateNatGatewayAddressOutput) SetNatGatewayId(v string) *DisassociateNatGatewayAddressOutput {
+ s.NatGatewayId = &v
+ return s
+}
+
type DisassociateRouteTableInput struct {
_ struct{} `type:"structure"`
@@ -104067,13 +111183,19 @@ type DisassociateTransitGatewayMulticastDomainInput struct {
DryRun *bool `type:"boolean"`
// The IDs of the subnets;
- SubnetIds []*string `locationNameList:"item" type:"list"`
+ //
+ // SubnetIds is a required field
+ SubnetIds []*string `locationNameList:"item" type:"list" required:"true"`
// The ID of the attachment.
- TransitGatewayAttachmentId *string `type:"string"`
+ //
+ // TransitGatewayAttachmentId is a required field
+ TransitGatewayAttachmentId *string `type:"string" required:"true"`
// The ID of the transit gateway multicast domain.
- TransitGatewayMulticastDomainId *string `type:"string"`
+ //
+ // TransitGatewayMulticastDomainId is a required field
+ TransitGatewayMulticastDomainId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -104094,6 +111216,25 @@ func (s DisassociateTransitGatewayMulticastDomainInput) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DisassociateTransitGatewayMulticastDomainInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DisassociateTransitGatewayMulticastDomainInput"}
+ if s.SubnetIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("SubnetIds"))
+ }
+ if s.TransitGatewayAttachmentId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId"))
+ }
+ if s.TransitGatewayMulticastDomainId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransitGatewayMulticastDomainId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetDryRun sets the DryRun field's value.
func (s *DisassociateTransitGatewayMulticastDomainInput) SetDryRun(v bool) *DisassociateTransitGatewayMulticastDomainInput {
s.DryRun = &v
@@ -104719,8 +111860,12 @@ type DiskImageDetail struct {
// For information about the import manifest referenced by this API action,
// see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html).
//
+ // ImportManifestUrl is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by DiskImageDetail's
+ // String and GoString methods.
+ //
// ImportManifestUrl is a required field
- ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"`
+ ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true" sensitive:"true"`
}
// String returns the string representation.
@@ -104916,6 +112061,9 @@ type DnsOptions struct {
// The DNS records created for the endpoint.
DnsRecordIpType *string `locationName:"dnsRecordIpType" type:"string" enum:"DnsRecordIpType"`
+
+ // Indicates whether to enable private DNS only for inbound endpoints.
+ PrivateDnsOnlyForInboundResolverEndpoint *bool `locationName:"privateDnsOnlyForInboundResolverEndpoint" type:"boolean"`
}
// String returns the string representation.
@@ -104942,12 +112090,24 @@ func (s *DnsOptions) SetDnsRecordIpType(v string) *DnsOptions {
return s
}
+// SetPrivateDnsOnlyForInboundResolverEndpoint sets the PrivateDnsOnlyForInboundResolverEndpoint field's value.
+func (s *DnsOptions) SetPrivateDnsOnlyForInboundResolverEndpoint(v bool) *DnsOptions {
+ s.PrivateDnsOnlyForInboundResolverEndpoint = &v
+ return s
+}
+
// Describes the DNS options for an endpoint.
type DnsOptionsSpecification struct {
_ struct{} `type:"structure"`
// The DNS records created for the endpoint.
DnsRecordIpType *string `type:"string" enum:"DnsRecordIpType"`
+
+ // Indicates whether to enable private DNS only for inbound endpoints. This
+ // option is available only for services that support both gateway and interface
+ // endpoints. It routes traffic that originates from the VPC to the gateway
+ // endpoint and traffic that originates from on-premises to the interface endpoint.
+ PrivateDnsOnlyForInboundResolverEndpoint *bool `type:"boolean"`
}
// String returns the string representation.
@@ -104974,6 +112134,12 @@ func (s *DnsOptionsSpecification) SetDnsRecordIpType(v string) *DnsOptionsSpecif
return s
}
+// SetPrivateDnsOnlyForInboundResolverEndpoint sets the PrivateDnsOnlyForInboundResolverEndpoint field's value.
+func (s *DnsOptionsSpecification) SetPrivateDnsOnlyForInboundResolverEndpoint(v bool) *DnsOptionsSpecification {
+ s.PrivateDnsOnlyForInboundResolverEndpoint = &v
+ return s
+}
+
// Information about the DNS server to be used.
type DnsServersOptionsModifyStructure struct {
_ struct{} `type:"structure"`
@@ -105040,7 +112206,32 @@ type EbsBlockDevice struct {
// Encrypted volumes can only be attached to instances that support Amazon EBS
// encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances).
//
- // This parameter is not returned by DescribeImageAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImageAttribute.html).
+ // This parameter is not returned by DescribeImageAttribute.
+ //
+ // For CreateImage and RegisterImage, whether you can include this parameter,
+ // and the allowed values differ depending on the type of block device mapping
+ // you are creating.
+ //
+ // * If you are creating a block device mapping for a new (empty) volume,
+ // you can include this parameter, and specify either true for an encrypted
+ // volume, or false for an unencrypted volume. If you omit this parameter,
+ // it defaults to false (unencrypted).
+ //
+ // * If you are creating a block device mapping from an existing encrypted
+ // or unencrypted snapshot, you must omit this parameter. If you include
+ // this parameter, the request will fail, regardless of the value that you
+ // specify.
+ //
+ // * If you are creating a block device mapping from an existing unencrypted
+ // volume, you can include this parameter, but you must specify false. If
+ // you specify true, the request will fail. In this case, we recommend that
+ // you omit the parameter.
+ //
+ // * If you are creating a block device mapping from an existing encrypted
+ // volume, you can include this parameter, and specify either true or false.
+ // However, if you specify false, the parameter is ignored and the block
+ // device mapping is always encrypted. In this case, we recommend that you
+ // omit the parameter.
Encrypted *bool `locationName:"encrypted" type:"boolean"`
// The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes,
@@ -106273,8 +113464,9 @@ type EnableFastLaunchInput struct {
// the launch template, but not both.
LaunchTemplate *FastLaunchLaunchTemplateSpecificationRequest `type:"structure"`
- // The maximum number of parallel instances to launch for creating resources.
- // Value must be 6 or greater.
+ // The maximum number of instances that Amazon EC2 can launch at the same time
+ // to create pre-provisioned snapshots for Windows faster launching. Value must
+ // be 6 or greater.
MaxParallelLaunches *int64 `type:"integer"`
// The type of resource to use for pre-provisioning the Windows AMI for faster
@@ -106370,7 +113562,8 @@ type EnableFastLaunchOutput struct {
// snapshots.
LaunchTemplate *FastLaunchLaunchTemplateSpecificationResponse `locationName:"launchTemplate" type:"structure"`
- // The maximum number of parallel instances to launch for creating resources.
+ // The maximum number of instances that Amazon EC2 can launch at the same time
+ // to create pre-provisioned snapshots for Windows faster launching.
MaxParallelLaunches *int64 `locationName:"maxParallelLaunches" type:"integer"`
// The owner ID for the Windows AMI for which faster launching was enabled.
@@ -106380,9 +113573,9 @@ type EnableFastLaunchOutput struct {
// for faster launching.
ResourceType *string `locationName:"resourceType" type:"string" enum:"FastLaunchResourceType"`
- // The configuration settings that were defined for creating and managing the
- // pre-provisioned snapshots for faster launching of the Windows AMI. This property
- // is returned when the associated resourceType is snapshot.
+ // Settings to create and manage the pre-provisioned snapshots that Amazon EC2
+ // uses for faster launches from the Windows AMI. This property is returned
+ // when the associated resourceType is snapshot.
SnapshotConfiguration *FastLaunchSnapshotConfigurationResponse `locationName:"snapshotConfiguration" type:"structure"`
// The current state of faster launching for the specified Windows AMI.
@@ -107039,6 +114232,10 @@ func (s *EnableIpamOrganizationAdminAccountOutput) SetSuccess(v bool) *EnableIpa
type EnableReachabilityAnalyzerOrganizationSharingInput struct {
_ struct{} `type:"structure"`
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
}
@@ -107069,6 +114266,7 @@ func (s *EnableReachabilityAnalyzerOrganizationSharingInput) SetDryRun(v bool) *
type EnableReachabilityAnalyzerOrganizationSharingOutput struct {
_ struct{} `type:"structure"`
+ // Returns true if the request succeeds; otherwise, returns an error.
ReturnValue *bool `locationName:"returnValue" type:"boolean"`
}
@@ -107859,6 +115057,12 @@ type Explanation struct {
// The explanation code.
ExplanationCode *string `locationName:"explanationCode" type:"string"`
+ // The Network Firewall stateful rule.
+ FirewallStatefulRule *FirewallStatefulRule `locationName:"firewallStatefulRule" type:"structure"`
+
+ // The Network Firewall stateless rule.
+ FirewallStatelessRule *FirewallStatelessRule `locationName:"firewallStatelessRule" type:"structure"`
+
// The route table.
IngressRouteTable *AnalysisComponent `locationName:"ingressRouteTable" type:"structure"`
@@ -108082,6 +115286,18 @@ func (s *Explanation) SetExplanationCode(v string) *Explanation {
return s
}
+// SetFirewallStatefulRule sets the FirewallStatefulRule field's value.
+func (s *Explanation) SetFirewallStatefulRule(v *FirewallStatefulRule) *Explanation {
+ s.FirewallStatefulRule = v
+ return s
+}
+
+// SetFirewallStatelessRule sets the FirewallStatelessRule field's value.
+func (s *Explanation) SetFirewallStatelessRule(v *FirewallStatelessRule) *Explanation {
+ s.FirewallStatelessRule = v
+ return s
+}
+
// SetIngressRouteTable sets the IngressRouteTable field's value.
func (s *Explanation) SetIngressRouteTable(v *AnalysisComponent) *Explanation {
s.IngressRouteTable = v
@@ -109667,6 +116883,237 @@ func (s *Filter) SetValues(v []*string) *Filter {
return s
}
+// Describes a port range.
+type FilterPortRange struct {
+ _ struct{} `type:"structure"`
+
+ // The first port in the range.
+ FromPort *int64 `locationName:"fromPort" type:"integer"`
+
+ // The last port in the range.
+ ToPort *int64 `locationName:"toPort" type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s FilterPortRange) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s FilterPortRange) GoString() string {
+ return s.String()
+}
+
+// SetFromPort sets the FromPort field's value.
+func (s *FilterPortRange) SetFromPort(v int64) *FilterPortRange {
+ s.FromPort = &v
+ return s
+}
+
+// SetToPort sets the ToPort field's value.
+func (s *FilterPortRange) SetToPort(v int64) *FilterPortRange {
+ s.ToPort = &v
+ return s
+}
+
+// Describes a stateful rule.
+type FirewallStatefulRule struct {
+ _ struct{} `type:"structure"`
+
+ // The destination ports.
+ DestinationPorts []*PortRange `locationName:"destinationPortSet" locationNameList:"item" type:"list"`
+
+ // The destination IP addresses, in CIDR notation.
+ Destinations []*string `locationName:"destinationSet" locationNameList:"item" type:"list"`
+
+ // The direction. The possible values are FORWARD and ANY.
+ Direction *string `locationName:"direction" type:"string"`
+
+ // The protocol.
+ Protocol *string `locationName:"protocol" type:"string"`
+
+ // The rule action. The possible values are pass, drop, and alert.
+ RuleAction *string `locationName:"ruleAction" type:"string"`
+
+ // The ARN of the stateful rule group.
+ RuleGroupArn *string `locationName:"ruleGroupArn" min:"1" type:"string"`
+
+ // The source ports.
+ SourcePorts []*PortRange `locationName:"sourcePortSet" locationNameList:"item" type:"list"`
+
+ // The source IP addresses, in CIDR notation.
+ Sources []*string `locationName:"sourceSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s FirewallStatefulRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s FirewallStatefulRule) GoString() string {
+ return s.String()
+}
+
+// SetDestinationPorts sets the DestinationPorts field's value.
+func (s *FirewallStatefulRule) SetDestinationPorts(v []*PortRange) *FirewallStatefulRule {
+ s.DestinationPorts = v
+ return s
+}
+
+// SetDestinations sets the Destinations field's value.
+func (s *FirewallStatefulRule) SetDestinations(v []*string) *FirewallStatefulRule {
+ s.Destinations = v
+ return s
+}
+
+// SetDirection sets the Direction field's value.
+func (s *FirewallStatefulRule) SetDirection(v string) *FirewallStatefulRule {
+ s.Direction = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *FirewallStatefulRule) SetProtocol(v string) *FirewallStatefulRule {
+ s.Protocol = &v
+ return s
+}
+
+// SetRuleAction sets the RuleAction field's value.
+func (s *FirewallStatefulRule) SetRuleAction(v string) *FirewallStatefulRule {
+ s.RuleAction = &v
+ return s
+}
+
+// SetRuleGroupArn sets the RuleGroupArn field's value.
+func (s *FirewallStatefulRule) SetRuleGroupArn(v string) *FirewallStatefulRule {
+ s.RuleGroupArn = &v
+ return s
+}
+
+// SetSourcePorts sets the SourcePorts field's value.
+func (s *FirewallStatefulRule) SetSourcePorts(v []*PortRange) *FirewallStatefulRule {
+ s.SourcePorts = v
+ return s
+}
+
+// SetSources sets the Sources field's value.
+func (s *FirewallStatefulRule) SetSources(v []*string) *FirewallStatefulRule {
+ s.Sources = v
+ return s
+}
+
+// Describes a stateless rule.
+type FirewallStatelessRule struct {
+ _ struct{} `type:"structure"`
+
+ // The destination ports.
+ DestinationPorts []*PortRange `locationName:"destinationPortSet" locationNameList:"item" type:"list"`
+
+ // The destination IP addresses, in CIDR notation.
+ Destinations []*string `locationName:"destinationSet" locationNameList:"item" type:"list"`
+
+ // The rule priority.
+ Priority *int64 `locationName:"priority" type:"integer"`
+
+ // The protocols.
+ Protocols []*int64 `locationName:"protocolSet" locationNameList:"item" type:"list"`
+
+ // The rule action. The possible values are pass, drop, and forward_to_site.
+ RuleAction *string `locationName:"ruleAction" type:"string"`
+
+ // The ARN of the stateless rule group.
+ RuleGroupArn *string `locationName:"ruleGroupArn" min:"1" type:"string"`
+
+ // The source ports.
+ SourcePorts []*PortRange `locationName:"sourcePortSet" locationNameList:"item" type:"list"`
+
+ // The source IP addresses, in CIDR notation.
+ Sources []*string `locationName:"sourceSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s FirewallStatelessRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s FirewallStatelessRule) GoString() string {
+ return s.String()
+}
+
+// SetDestinationPorts sets the DestinationPorts field's value.
+func (s *FirewallStatelessRule) SetDestinationPorts(v []*PortRange) *FirewallStatelessRule {
+ s.DestinationPorts = v
+ return s
+}
+
+// SetDestinations sets the Destinations field's value.
+func (s *FirewallStatelessRule) SetDestinations(v []*string) *FirewallStatelessRule {
+ s.Destinations = v
+ return s
+}
+
+// SetPriority sets the Priority field's value.
+func (s *FirewallStatelessRule) SetPriority(v int64) *FirewallStatelessRule {
+ s.Priority = &v
+ return s
+}
+
+// SetProtocols sets the Protocols field's value.
+func (s *FirewallStatelessRule) SetProtocols(v []*int64) *FirewallStatelessRule {
+ s.Protocols = v
+ return s
+}
+
+// SetRuleAction sets the RuleAction field's value.
+func (s *FirewallStatelessRule) SetRuleAction(v string) *FirewallStatelessRule {
+ s.RuleAction = &v
+ return s
+}
+
+// SetRuleGroupArn sets the RuleGroupArn field's value.
+func (s *FirewallStatelessRule) SetRuleGroupArn(v string) *FirewallStatelessRule {
+ s.RuleGroupArn = &v
+ return s
+}
+
+// SetSourcePorts sets the SourcePorts field's value.
+func (s *FirewallStatelessRule) SetSourcePorts(v []*PortRange) *FirewallStatelessRule {
+ s.SourcePorts = v
+ return s
+}
+
+// SetSources sets the Sources field's value.
+func (s *FirewallStatelessRule) SetSources(v []*string) *FirewallStatelessRule {
+ s.Sources = v
+ return s
+}
+
// Information about a Capacity Reservation in a Capacity Reservation Fleet.
type FleetCapacityReservation struct {
_ struct{} `type:"structure"`
@@ -109828,6 +117275,8 @@ type FleetData struct {
// Indicates whether running instances should be terminated if the target capacity
// of the EC2 Fleet is decreased below the current size of the EC2 Fleet.
+ //
+ // Supported only for fleets of type maintain.
ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"FleetExcessCapacityTerminationPolicy"`
// The ID of the EC2 Fleet.
@@ -110173,6 +117622,8 @@ type FleetLaunchTemplateOverrides struct {
// The instance type.
//
+ // mac1.metal is not supported as a launch template override.
+ //
// If you specify InstanceType, you can't specify InstanceRequirements.
InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
@@ -110303,6 +117754,8 @@ type FleetLaunchTemplateOverridesRequest struct {
// The instance type.
//
+ // mac1.metal is not supported as a launch template override.
+ //
// If you specify InstanceType, you can't specify InstanceRequirements.
InstanceType *string `type:"string" enum:"InstanceType"`
@@ -111076,6 +118529,9 @@ type FpgaImage struct {
// The FPGA image identifier (AFI ID).
FpgaImageId *string `locationName:"fpgaImageId" type:"string"`
+ // The instance types supported by the AFI.
+ InstanceTypes []*string `locationName:"instanceTypes" locationNameList:"item" type:"list"`
+
// The name of the AFI.
Name *string `locationName:"name" type:"string"`
@@ -111156,6 +118612,12 @@ func (s *FpgaImage) SetFpgaImageId(v string) *FpgaImage {
return s
}
+// SetInstanceTypes sets the InstanceTypes field's value.
+func (s *FpgaImage) SetInstanceTypes(v []*string) *FpgaImage {
+ s.InstanceTypes = v
+ return s
+}
+
// SetName sets the Name field's value.
func (s *FpgaImage) SetName(v string) *FpgaImage {
s.Name = &v
@@ -111380,7 +118842,9 @@ type GetAssociatedEnclaveCertificateIamRolesInput struct {
// The ARN of the ACM certificate for which to view the associated IAM roles,
// encryption keys, and Amazon S3 object information.
- CertificateArn *string `min:"1" type:"string"`
+ //
+ // CertificateArn is a required field
+ CertificateArn *string `type:"string" required:"true"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
@@ -111410,8 +118874,8 @@ func (s GetAssociatedEnclaveCertificateIamRolesInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetAssociatedEnclaveCertificateIamRolesInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetAssociatedEnclaveCertificateIamRolesInput"}
- if s.CertificateArn != nil && len(*s.CertificateArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1))
+ if s.CertificateArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("CertificateArn"))
}
if invalidParams.Len() > 0 {
@@ -112616,7 +120080,9 @@ func (s *GetFlowLogsIntegrationTemplateOutput) SetResult(v string) *GetFlowLogsI
type GetGroupsForCapacityReservationInput struct {
_ struct{} `type:"structure"`
- // The ID of the Capacity Reservation.
+ // The ID of the Capacity Reservation. If you specify a Capacity Reservation
+ // that is shared with you, the operation returns only Capacity Reservation
+ // groups that you own.
//
// CapacityReservationId is a required field
CapacityReservationId *string `type:"string" required:"true"`
@@ -112876,12 +120342,13 @@ type GetInstanceTypesFromInstanceRequirementsInput struct {
// InstanceRequirements is a required field
InstanceRequirements *InstanceRequirementsRequest `type:"structure" required:"true"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `type:"integer"`
- // The token for the next set of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// The virtualization type.
@@ -112974,7 +120441,8 @@ type GetInstanceTypesFromInstanceRequirementsOutput struct {
// The instance types with the specified instance attributes.
InstanceTypes []*InstanceTypeInfoFromInstanceRequirements `locationName:"instanceTypeSet" locationNameList:"item" type:"list"`
- // The token for the next set of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -113274,6 +120742,295 @@ func (s *GetIpamAddressHistoryOutput) SetNextToken(v string) *GetIpamAddressHist
return s
}
+type GetIpamDiscoveredAccountsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Web Services Region that the account information is returned from.
+ //
+ // DiscoveryRegion is a required field
+ DiscoveryRegion *string `type:"string" required:"true"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // Discovered account filters.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // A resource discovery ID.
+ //
+ // IpamResourceDiscoveryId is a required field
+ IpamResourceDiscoveryId *string `type:"string" required:"true"`
+
+ // The maximum number of discovered accounts to return in one page of results.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // Specify the pagination token from a previous request to retrieve the next
+ // page of results.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetIpamDiscoveredAccountsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetIpamDiscoveredAccountsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetIpamDiscoveredAccountsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetIpamDiscoveredAccountsInput"}
+ if s.DiscoveryRegion == nil {
+ invalidParams.Add(request.NewErrParamRequired("DiscoveryRegion"))
+ }
+ if s.IpamResourceDiscoveryId == nil {
+ invalidParams.Add(request.NewErrParamRequired("IpamResourceDiscoveryId"))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDiscoveryRegion sets the DiscoveryRegion field's value.
+func (s *GetIpamDiscoveredAccountsInput) SetDiscoveryRegion(v string) *GetIpamDiscoveredAccountsInput {
+ s.DiscoveryRegion = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetIpamDiscoveredAccountsInput) SetDryRun(v bool) *GetIpamDiscoveredAccountsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *GetIpamDiscoveredAccountsInput) SetFilters(v []*Filter) *GetIpamDiscoveredAccountsInput {
+ s.Filters = v
+ return s
+}
+
+// SetIpamResourceDiscoveryId sets the IpamResourceDiscoveryId field's value.
+func (s *GetIpamDiscoveredAccountsInput) SetIpamResourceDiscoveryId(v string) *GetIpamDiscoveredAccountsInput {
+ s.IpamResourceDiscoveryId = &v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *GetIpamDiscoveredAccountsInput) SetMaxResults(v int64) *GetIpamDiscoveredAccountsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetIpamDiscoveredAccountsInput) SetNextToken(v string) *GetIpamDiscoveredAccountsInput {
+ s.NextToken = &v
+ return s
+}
+
+type GetIpamDiscoveredAccountsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Discovered accounts.
+ IpamDiscoveredAccounts []*IpamDiscoveredAccount `locationName:"ipamDiscoveredAccountSet" locationNameList:"item" type:"list"`
+
+ // Specify the pagination token from a previous request to retrieve the next
+ // page of results.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetIpamDiscoveredAccountsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetIpamDiscoveredAccountsOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamDiscoveredAccounts sets the IpamDiscoveredAccounts field's value.
+func (s *GetIpamDiscoveredAccountsOutput) SetIpamDiscoveredAccounts(v []*IpamDiscoveredAccount) *GetIpamDiscoveredAccountsOutput {
+ s.IpamDiscoveredAccounts = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetIpamDiscoveredAccountsOutput) SetNextToken(v string) *GetIpamDiscoveredAccountsOutput {
+ s.NextToken = &v
+ return s
+}
+
+type GetIpamDiscoveredResourceCidrsInput struct {
+ _ struct{} `type:"structure"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // Filters.
+ Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
+
+ // A resource discovery ID.
+ //
+ // IpamResourceDiscoveryId is a required field
+ IpamResourceDiscoveryId *string `type:"string" required:"true"`
+
+ // The maximum number of discovered resource CIDRs to return in one page of
+ // results.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // Specify the pagination token from a previous request to retrieve the next
+ // page of results.
+ NextToken *string `type:"string"`
+
+ // A resource Region.
+ //
+ // ResourceRegion is a required field
+ ResourceRegion *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetIpamDiscoveredResourceCidrsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetIpamDiscoveredResourceCidrsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetIpamDiscoveredResourceCidrsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetIpamDiscoveredResourceCidrsInput"}
+ if s.IpamResourceDiscoveryId == nil {
+ invalidParams.Add(request.NewErrParamRequired("IpamResourceDiscoveryId"))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+ if s.ResourceRegion == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceRegion"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetIpamDiscoveredResourceCidrsInput) SetDryRun(v bool) *GetIpamDiscoveredResourceCidrsInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetFilters sets the Filters field's value.
+func (s *GetIpamDiscoveredResourceCidrsInput) SetFilters(v []*Filter) *GetIpamDiscoveredResourceCidrsInput {
+ s.Filters = v
+ return s
+}
+
+// SetIpamResourceDiscoveryId sets the IpamResourceDiscoveryId field's value.
+func (s *GetIpamDiscoveredResourceCidrsInput) SetIpamResourceDiscoveryId(v string) *GetIpamDiscoveredResourceCidrsInput {
+ s.IpamResourceDiscoveryId = &v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *GetIpamDiscoveredResourceCidrsInput) SetMaxResults(v int64) *GetIpamDiscoveredResourceCidrsInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetIpamDiscoveredResourceCidrsInput) SetNextToken(v string) *GetIpamDiscoveredResourceCidrsInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetResourceRegion sets the ResourceRegion field's value.
+func (s *GetIpamDiscoveredResourceCidrsInput) SetResourceRegion(v string) *GetIpamDiscoveredResourceCidrsInput {
+ s.ResourceRegion = &v
+ return s
+}
+
+type GetIpamDiscoveredResourceCidrsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Discovered resource CIDRs.
+ IpamDiscoveredResourceCidrs []*IpamDiscoveredResourceCidr `locationName:"ipamDiscoveredResourceCidrSet" locationNameList:"item" type:"list"`
+
+ // Specify the pagination token from a previous request to retrieve the next
+ // page of results.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetIpamDiscoveredResourceCidrsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetIpamDiscoveredResourceCidrsOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamDiscoveredResourceCidrs sets the IpamDiscoveredResourceCidrs field's value.
+func (s *GetIpamDiscoveredResourceCidrsOutput) SetIpamDiscoveredResourceCidrs(v []*IpamDiscoveredResourceCidr) *GetIpamDiscoveredResourceCidrsOutput {
+ s.IpamDiscoveredResourceCidrs = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetIpamDiscoveredResourceCidrsOutput) SetNextToken(v string) *GetIpamDiscoveredResourceCidrsOutput {
+ s.NextToken = &v
+ return s
+}
+
type GetIpamPoolAllocationsInput struct {
_ struct{} `type:"structure"`
@@ -114668,12 +122425,13 @@ type GetSpotPlacementScoresInput struct {
// If you specify InstanceTypes, you can't specify InstanceRequirementsWithMetadata.
InstanceTypes []*string `locationName:"InstanceType" type:"list"`
- // The maximum number of results to return in a single call. Specify a value
- // between 1 and 1000. The default value is 1000. To retrieve the remaining
- // results, make another call with the returned NextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"10" type:"integer"`
- // The token for the next set of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// The Regions used to narrow down the list of Regions to be scored. Enter the
@@ -114797,7 +122555,8 @@ func (s *GetSpotPlacementScoresInput) SetTargetCapacityUnitType(v string) *GetSp
type GetSpotPlacementScoresOutput struct {
_ struct{} `type:"structure"`
- // The token for the next set of results.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The Spot placement score for the top 10 Regions or Availability Zones, scored
@@ -115168,7 +122927,9 @@ type GetTransitGatewayMulticastDomainAssociationsInput struct {
NextToken *string `type:"string"`
// The ID of the transit gateway multicast domain.
- TransitGatewayMulticastDomainId *string `type:"string"`
+ //
+ // TransitGatewayMulticastDomainId is a required field
+ TransitGatewayMulticastDomainId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -115195,6 +122956,9 @@ func (s *GetTransitGatewayMulticastDomainAssociationsInput) Validate() error {
if s.MaxResults != nil && *s.MaxResults < 5 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
}
+ if s.TransitGatewayMulticastDomainId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransitGatewayMulticastDomainId"))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -115943,6 +123707,202 @@ func (s *GetTransitGatewayRouteTablePropagationsOutput) SetTransitGatewayRouteTa
return s
}
+type GetVerifiedAccessEndpointPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ //
+ // VerifiedAccessEndpointId is a required field
+ VerifiedAccessEndpointId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVerifiedAccessEndpointPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVerifiedAccessEndpointPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetVerifiedAccessEndpointPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetVerifiedAccessEndpointPolicyInput"}
+ if s.VerifiedAccessEndpointId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessEndpointId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetVerifiedAccessEndpointPolicyInput) SetDryRun(v bool) *GetVerifiedAccessEndpointPolicyInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessEndpointId sets the VerifiedAccessEndpointId field's value.
+func (s *GetVerifiedAccessEndpointPolicyInput) SetVerifiedAccessEndpointId(v string) *GetVerifiedAccessEndpointPolicyInput {
+ s.VerifiedAccessEndpointId = &v
+ return s
+}
+
+type GetVerifiedAccessEndpointPolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Web Services Verified Access policy document.
+ PolicyDocument *string `locationName:"policyDocument" type:"string"`
+
+ // The status of the Verified Access policy.
+ PolicyEnabled *bool `locationName:"policyEnabled" type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVerifiedAccessEndpointPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVerifiedAccessEndpointPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyDocument sets the PolicyDocument field's value.
+func (s *GetVerifiedAccessEndpointPolicyOutput) SetPolicyDocument(v string) *GetVerifiedAccessEndpointPolicyOutput {
+ s.PolicyDocument = &v
+ return s
+}
+
+// SetPolicyEnabled sets the PolicyEnabled field's value.
+func (s *GetVerifiedAccessEndpointPolicyOutput) SetPolicyEnabled(v bool) *GetVerifiedAccessEndpointPolicyOutput {
+ s.PolicyEnabled = &v
+ return s
+}
+
+type GetVerifiedAccessGroupPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access group.
+ //
+ // VerifiedAccessGroupId is a required field
+ VerifiedAccessGroupId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVerifiedAccessGroupPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVerifiedAccessGroupPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetVerifiedAccessGroupPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetVerifiedAccessGroupPolicyInput"}
+ if s.VerifiedAccessGroupId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessGroupId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetVerifiedAccessGroupPolicyInput) SetDryRun(v bool) *GetVerifiedAccessGroupPolicyInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *GetVerifiedAccessGroupPolicyInput) SetVerifiedAccessGroupId(v string) *GetVerifiedAccessGroupPolicyInput {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+type GetVerifiedAccessGroupPolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Web Services Verified Access policy document.
+ PolicyDocument *string `locationName:"policyDocument" type:"string"`
+
+ // The status of the Verified Access policy.
+ PolicyEnabled *bool `locationName:"policyEnabled" type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVerifiedAccessGroupPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVerifiedAccessGroupPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyDocument sets the PolicyDocument field's value.
+func (s *GetVerifiedAccessGroupPolicyOutput) SetPolicyDocument(v string) *GetVerifiedAccessGroupPolicyOutput {
+ s.PolicyDocument = &v
+ return s
+}
+
+// SetPolicyEnabled sets the PolicyEnabled field's value.
+func (s *GetVerifiedAccessGroupPolicyOutput) SetPolicyEnabled(v bool) *GetVerifiedAccessGroupPolicyOutput {
+ s.PolicyEnabled = &v
+ return s
+}
+
type GetVpnConnectionDeviceSampleConfigurationInput struct {
_ struct{} `type:"structure"`
@@ -116180,6 +124140,154 @@ func (s *GetVpnConnectionDeviceTypesOutput) SetVpnConnectionDeviceTypes(v []*Vpn
return s
}
+type GetVpnTunnelReplacementStatusInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Site-to-Site VPN connection.
+ //
+ // VpnConnectionId is a required field
+ VpnConnectionId *string `type:"string" required:"true"`
+
+ // The external IP address of the VPN tunnel.
+ //
+ // VpnTunnelOutsideIpAddress is a required field
+ VpnTunnelOutsideIpAddress *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVpnTunnelReplacementStatusInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVpnTunnelReplacementStatusInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetVpnTunnelReplacementStatusInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetVpnTunnelReplacementStatusInput"}
+ if s.VpnConnectionId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VpnConnectionId"))
+ }
+ if s.VpnTunnelOutsideIpAddress == nil {
+ invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetVpnTunnelReplacementStatusInput) SetDryRun(v bool) *GetVpnTunnelReplacementStatusInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVpnConnectionId sets the VpnConnectionId field's value.
+func (s *GetVpnTunnelReplacementStatusInput) SetVpnConnectionId(v string) *GetVpnTunnelReplacementStatusInput {
+ s.VpnConnectionId = &v
+ return s
+}
+
+// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value.
+func (s *GetVpnTunnelReplacementStatusInput) SetVpnTunnelOutsideIpAddress(v string) *GetVpnTunnelReplacementStatusInput {
+ s.VpnTunnelOutsideIpAddress = &v
+ return s
+}
+
+type GetVpnTunnelReplacementStatusOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the customer gateway.
+ CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"`
+
+ // Get details of pending tunnel endpoint maintenance.
+ MaintenanceDetails *MaintenanceDetails `locationName:"maintenanceDetails" type:"structure"`
+
+ // The ID of the transit gateway associated with the VPN connection.
+ TransitGatewayId *string `locationName:"transitGatewayId" type:"string"`
+
+ // The ID of the Site-to-Site VPN connection.
+ VpnConnectionId *string `locationName:"vpnConnectionId" type:"string"`
+
+ // The ID of the virtual private gateway.
+ VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"`
+
+ // The external IP address of the VPN tunnel.
+ VpnTunnelOutsideIpAddress *string `locationName:"vpnTunnelOutsideIpAddress" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVpnTunnelReplacementStatusOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetVpnTunnelReplacementStatusOutput) GoString() string {
+ return s.String()
+}
+
+// SetCustomerGatewayId sets the CustomerGatewayId field's value.
+func (s *GetVpnTunnelReplacementStatusOutput) SetCustomerGatewayId(v string) *GetVpnTunnelReplacementStatusOutput {
+ s.CustomerGatewayId = &v
+ return s
+}
+
+// SetMaintenanceDetails sets the MaintenanceDetails field's value.
+func (s *GetVpnTunnelReplacementStatusOutput) SetMaintenanceDetails(v *MaintenanceDetails) *GetVpnTunnelReplacementStatusOutput {
+ s.MaintenanceDetails = v
+ return s
+}
+
+// SetTransitGatewayId sets the TransitGatewayId field's value.
+func (s *GetVpnTunnelReplacementStatusOutput) SetTransitGatewayId(v string) *GetVpnTunnelReplacementStatusOutput {
+ s.TransitGatewayId = &v
+ return s
+}
+
+// SetVpnConnectionId sets the VpnConnectionId field's value.
+func (s *GetVpnTunnelReplacementStatusOutput) SetVpnConnectionId(v string) *GetVpnTunnelReplacementStatusOutput {
+ s.VpnConnectionId = &v
+ return s
+}
+
+// SetVpnGatewayId sets the VpnGatewayId field's value.
+func (s *GetVpnTunnelReplacementStatusOutput) SetVpnGatewayId(v string) *GetVpnTunnelReplacementStatusOutput {
+ s.VpnGatewayId = &v
+ return s
+}
+
+// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value.
+func (s *GetVpnTunnelReplacementStatusOutput) SetVpnTunnelOutsideIpAddress(v string) *GetVpnTunnelReplacementStatusOutput {
+ s.VpnTunnelOutsideIpAddress = &v
+ return s
+}
+
// Describes the GPU accelerators for the instance type.
type GpuDeviceInfo struct {
_ struct{} `type:"structure"`
@@ -116568,6 +124676,10 @@ type Host struct {
// The ID of the Dedicated Host.
HostId *string `locationName:"hostId" type:"string"`
+ // Indicates whether host maintenance is enabled or disabled for the Dedicated
+ // Host.
+ HostMaintenance *string `locationName:"hostMaintenance" type:"string" enum:"HostMaintenance"`
+
// The hardware specifications of the Dedicated Host.
HostProperties *HostProperties `locationName:"hostProperties" type:"structure"`
@@ -116669,6 +124781,12 @@ func (s *Host) SetHostId(v string) *Host {
return s
}
+// SetHostMaintenance sets the HostMaintenance field's value.
+func (s *Host) SetHostMaintenance(v string) *Host {
+ s.HostMaintenance = &v
+ return s
+}
+
// SetHostProperties sets the HostProperties field's value.
func (s *Host) SetHostProperties(v *HostProperties) *Host {
s.HostProperties = v
@@ -117406,7 +125524,7 @@ type Image struct {
BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
// The boot mode of the image. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
BootMode *string `locationName:"bootMode" type:"string" enum:"BootModeValues"`
// The date and time the image was created.
@@ -117444,7 +125562,7 @@ type Image struct {
// by default, the instance requires that IMDSv2 is used when requesting instance
// metadata. In addition, HttpPutResponseHopLimit is set to 2. For more information,
// see Configure the AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html#configure-IMDS-new-instances-ami-configuration)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
ImdsSupport *string `locationName:"imdsSupport" type:"string" enum:"ImdsSupportValues"`
// The kernel associated with the image, if any. Only applicable for machine
@@ -117462,7 +125580,7 @@ type Image struct {
// The platform details associated with the billing code of the AMI. For more
// information, see Understand AMI billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
PlatformDetails *string `locationName:"platformDetails" type:"string"`
// Any product codes associated with the AMI.
@@ -117500,7 +125618,7 @@ type Image struct {
// If the image is configured for NitroTPM support, the value is v2.0. For more
// information, see NitroTPM (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
TpmSupport *string `locationName:"tpmSupport" type:"string" enum:"TpmSupportValues"`
// The operation of the Amazon EC2 instance and the billing code that is associated
@@ -119701,7 +127819,14 @@ type Instance struct {
// Any block device mapping entries for the instance.
BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
- // The boot mode of the instance. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
+ // The boot mode that was specified by the AMI. If the value is uefi-preferred,
+ // the AMI supports both UEFI and Legacy BIOS. The currentInstanceBootMode parameter
+ // is the boot mode that is used to boot the instance at launch or start.
+ //
+ // The operating system contained in the AMI must be configured to support the
+ // specified boot mode.
+ //
+ // For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
// in the Amazon EC2 User Guide.
BootMode *string `locationName:"bootMode" type:"string" enum:"BootModeValues"`
@@ -119717,6 +127842,11 @@ type Instance struct {
// The CPU options for the instance.
CpuOptions *CpuOptions `locationName:"cpuOptions" type:"structure"`
+ // The boot mode that is used to boot the instance at launch or start. For more
+ // information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
+ // in the Amazon EC2 User Guide.
+ CurrentInstanceBootMode *string `locationName:"currentInstanceBootMode" type:"string" enum:"InstanceBootModeValues"`
+
// Indicates whether the instance is optimized for Amazon EBS I/O. This optimization
// provides dedicated throughput to Amazon EBS and an optimized configuration
// stack to provide optimal I/O performance. This optimization isn't available
@@ -119955,6 +128085,12 @@ func (s *Instance) SetCpuOptions(v *CpuOptions) *Instance {
return s
}
+// SetCurrentInstanceBootMode sets the CurrentInstanceBootMode field's value.
+func (s *Instance) SetCurrentInstanceBootMode(v string) *Instance {
+ s.CurrentInstanceBootMode = &v
+ return s
+}
+
// SetEbsOptimized sets the EbsOptimized field's value.
func (s *Instance) SetEbsOptimized(v bool) *Instance {
s.EbsOptimized = &v
@@ -120494,7 +128630,9 @@ type InstanceCreditSpecificationRequest struct {
CpuCredits *string `type:"string"`
// The ID of the instance.
- InstanceId *string `type:"string"`
+ //
+ // InstanceId is a required field
+ InstanceId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -120515,6 +128653,19 @@ func (s InstanceCreditSpecificationRequest) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InstanceCreditSpecificationRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InstanceCreditSpecificationRequest"}
+ if s.InstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetCpuCredits sets the CpuCredits field's value.
func (s *InstanceCreditSpecificationRequest) SetCpuCredits(v string) *InstanceCreditSpecificationRequest {
s.CpuCredits = &v
@@ -121269,18 +129420,20 @@ type InstanceMetadataOptionsRequest struct {
// Possible values: Integers from 1 to 64
HttpPutResponseHopLimit *int64 `type:"integer"`
- // The state of token usage for your instance metadata requests.
+ // IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional
+ // (in other words, set the use of IMDSv2 to optional) or required (in other
+ // words, set the use of IMDSv2 to required).
//
- // If the state is optional, you can choose to retrieve instance metadata with
- // or without a session token on your request. If you retrieve the IAM role
- // credentials without a token, the version 1.0 role credentials are returned.
- // If you retrieve the IAM role credentials using a valid session token, the
- // version 2.0 role credentials are returned.
+ // * optional - When IMDSv2 is optional, you can choose to retrieve instance
+ // metadata with or without a session token in your request. If you retrieve
+ // the IAM role credentials without a token, the IMDSv1 role credentials
+ // are returned. If you retrieve the IAM role credentials using a valid session
+ // token, the IMDSv2 role credentials are returned.
//
- // If the state is required, you must send a session token with any instance
- // metadata retrieval requests. In this state, retrieving the IAM role credentials
- // always returns the version 2.0 credentials; the version 1.0 credentials are
- // not available.
+ // * required - When IMDSv2 is required, you must send a session token with
+ // any instance metadata retrieval requests. In this state, retrieving the
+ // IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials
+ // are not available.
//
// Default: optional
HttpTokens *string `type:"string" enum:"HttpTokensState"`
@@ -121364,18 +129517,20 @@ type InstanceMetadataOptionsResponse struct {
// Possible values: Integers from 1 to 64
HttpPutResponseHopLimit *int64 `locationName:"httpPutResponseHopLimit" type:"integer"`
- // The state of token usage for your instance metadata requests.
+ // IMDSv2 uses token-backed sessions. Indicates whether the use of HTTP tokens
+ // is optional (in other words, indicates whether the use of IMDSv2 is optional)
+ // or required (in other words, indicates whether the use of IMDSv2 is required).
//
- // If the state is optional, you can choose to retrieve instance metadata with
- // or without a session token on your request. If you retrieve the IAM role
- // credentials without a token, the version 1.0 role credentials are returned.
- // If you retrieve the IAM role credentials using a valid session token, the
- // version 2.0 role credentials are returned.
+ // * optional - When IMDSv2 is optional, you can choose to retrieve instance
+ // metadata with or without a session token in your request. If you retrieve
+ // the IAM role credentials without a token, the IMDSv1 role credentials
+ // are returned. If you retrieve the IAM role credentials using a valid session
+ // token, the IMDSv2 role credentials are returned.
//
- // If the state is required, you must send a session token with any instance
- // metadata retrieval requests. In this state, retrieving the IAM role credentials
- // always returns the version 2.0 credentials; the version 1.0 credentials are
- // not available.
+ // * required - When IMDSv2 is required, you must send a session token with
+ // any instance metadata retrieval requests. In this state, retrieving the
+ // IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials
+ // are not available.
//
// Default: optional
HttpTokens *string `locationName:"httpTokens" type:"string" enum:"HttpTokensState"`
@@ -123113,7 +131268,9 @@ type InstanceSpecification struct {
ExcludeDataVolumeIds []*string `locationName:"ExcludeDataVolumeId" locationNameList:"VolumeId" type:"list"`
// The instance to specify which volumes should be snapshotted.
- InstanceId *string `type:"string"`
+ //
+ // InstanceId is a required field
+ InstanceId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -123134,6 +131291,19 @@ func (s InstanceSpecification) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InstanceSpecification) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InstanceSpecification"}
+ if s.InstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetExcludeBootVolume sets the ExcludeBootVolume field's value.
func (s *InstanceSpecification) SetExcludeBootVolume(v bool) *InstanceSpecification {
s.ExcludeBootVolume = &v
@@ -124164,9 +132334,10 @@ func (s *InternetGatewayAttachment) SetVpcId(v string) *InternetGatewayAttachmen
type IpPermission struct {
_ struct{} `type:"structure"`
- // The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6
- // type number. A value of -1 indicates all ICMP/ICMPv6 types. If you specify
- // all ICMP/ICMPv6 types, you must specify all codes.
+ // If the protocol is TCP or UDP, this is the start of the port range. If the
+ // protocol is ICMP or ICMPv6, this is the type number. A value of -1 indicates
+ // all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify
+ // all ICMP/ICMPv6 codes.
FromPort *int64 `locationName:"fromPort" type:"integer"`
// The IP protocol name (tcp, udp, icmp, icmpv6) or number (see Protocol Numbers
@@ -124189,9 +132360,10 @@ type IpPermission struct {
// [VPC only] The prefix list IDs.
PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"`
- // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code.
- // A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6
- // types, you must specify all codes.
+ // If the protocol is TCP or UDP, this is the end of the port range. If the
+ // protocol is ICMP or ICMPv6, this is the code. A value of -1 indicates all
+ // ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify
+ // all ICMP/ICMPv6 codes.
ToPort *int64 `locationName:"toPort" type:"integer"`
// The security group and Amazon Web Services account ID pairs.
@@ -124313,10 +132485,16 @@ func (s *IpRange) SetDescription(v string) *IpRange {
type Ipam struct {
_ struct{} `type:"structure"`
+ // The IPAM's default resource discovery association ID.
+ DefaultResourceDiscoveryAssociationId *string `locationName:"defaultResourceDiscoveryAssociationId" type:"string"`
+
+ // The IPAM's default resource discovery ID.
+ DefaultResourceDiscoveryId *string `locationName:"defaultResourceDiscoveryId" type:"string"`
+
// The description for the IPAM.
Description *string `locationName:"description" type:"string"`
- // The ARN of the IPAM.
+ // The Amazon Resource Name (ARN) of the IPAM.
IpamArn *string `locationName:"ipamArn" min:"1" type:"string"`
// The ID of the IPAM.
@@ -124343,6 +132521,9 @@ type Ipam struct {
// The ID of the IPAM's default public scope.
PublicDefaultScopeId *string `locationName:"publicDefaultScopeId" type:"string"`
+ // The IPAM's resource discovery association count.
+ ResourceDiscoveryAssociationCount *int64 `locationName:"resourceDiscoveryAssociationCount" type:"integer"`
+
// The number of scopes in the IPAM. The scope quota is 5. For more information
// on quotas, see Quotas in IPAM (https://docs.aws.amazon.com/vpc/latest/ipam/quotas-ipam.html)
// in the Amazon VPC IPAM User Guide.
@@ -124376,6 +132557,18 @@ func (s Ipam) GoString() string {
return s.String()
}
+// SetDefaultResourceDiscoveryAssociationId sets the DefaultResourceDiscoveryAssociationId field's value.
+func (s *Ipam) SetDefaultResourceDiscoveryAssociationId(v string) *Ipam {
+ s.DefaultResourceDiscoveryAssociationId = &v
+ return s
+}
+
+// SetDefaultResourceDiscoveryId sets the DefaultResourceDiscoveryId field's value.
+func (s *Ipam) SetDefaultResourceDiscoveryId(v string) *Ipam {
+ s.DefaultResourceDiscoveryId = &v
+ return s
+}
+
// SetDescription sets the Description field's value.
func (s *Ipam) SetDescription(v string) *Ipam {
s.Description = &v
@@ -124424,6 +132617,12 @@ func (s *Ipam) SetPublicDefaultScopeId(v string) *Ipam {
return s
}
+// SetResourceDiscoveryAssociationCount sets the ResourceDiscoveryAssociationCount field's value.
+func (s *Ipam) SetResourceDiscoveryAssociationCount(v int64) *Ipam {
+ s.ResourceDiscoveryAssociationCount = &v
+ return s
+}
+
// SetScopeCount sets the ScopeCount field's value.
func (s *Ipam) SetScopeCount(v int64) *Ipam {
s.ScopeCount = &v
@@ -124617,6 +132816,265 @@ func (s *IpamCidrAuthorizationContext) SetSignature(v string) *IpamCidrAuthoriza
return s
}
+// An IPAM discovered account. A discovered account is an Amazon Web Services
+// account that is monitored under a resource discovery. If you have integrated
+// IPAM with Amazon Web Services Organizations, all accounts in the organization
+// are discovered accounts.
+type IpamDiscoveredAccount struct {
+ _ struct{} `type:"structure"`
+
+ // The account ID.
+ AccountId *string `locationName:"accountId" type:"string"`
+
+ // The Amazon Web Services Region that the account information is returned from.
+ // An account can be discovered in multiple regions and will have a separate
+ // discovered account for each Region.
+ DiscoveryRegion *string `locationName:"discoveryRegion" type:"string"`
+
+ // The resource discovery failure reason.
+ FailureReason *IpamDiscoveryFailureReason `locationName:"failureReason" type:"structure"`
+
+ // The last attempted resource discovery time.
+ LastAttemptedDiscoveryTime *time.Time `locationName:"lastAttemptedDiscoveryTime" type:"timestamp"`
+
+ // The last successful resource discovery time.
+ LastSuccessfulDiscoveryTime *time.Time `locationName:"lastSuccessfulDiscoveryTime" type:"timestamp"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamDiscoveredAccount) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamDiscoveredAccount) GoString() string {
+ return s.String()
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *IpamDiscoveredAccount) SetAccountId(v string) *IpamDiscoveredAccount {
+ s.AccountId = &v
+ return s
+}
+
+// SetDiscoveryRegion sets the DiscoveryRegion field's value.
+func (s *IpamDiscoveredAccount) SetDiscoveryRegion(v string) *IpamDiscoveredAccount {
+ s.DiscoveryRegion = &v
+ return s
+}
+
+// SetFailureReason sets the FailureReason field's value.
+func (s *IpamDiscoveredAccount) SetFailureReason(v *IpamDiscoveryFailureReason) *IpamDiscoveredAccount {
+ s.FailureReason = v
+ return s
+}
+
+// SetLastAttemptedDiscoveryTime sets the LastAttemptedDiscoveryTime field's value.
+func (s *IpamDiscoveredAccount) SetLastAttemptedDiscoveryTime(v time.Time) *IpamDiscoveredAccount {
+ s.LastAttemptedDiscoveryTime = &v
+ return s
+}
+
+// SetLastSuccessfulDiscoveryTime sets the LastSuccessfulDiscoveryTime field's value.
+func (s *IpamDiscoveredAccount) SetLastSuccessfulDiscoveryTime(v time.Time) *IpamDiscoveredAccount {
+ s.LastSuccessfulDiscoveryTime = &v
+ return s
+}
+
+// An IPAM discovered resource CIDR. A discovered resource is a resource CIDR
+// monitored under a resource discovery. The following resources can be discovered:
+// VPCs, Public IPv4 pools, VPC subnets, and Elastic IP addresses. The discovered
+// resource CIDR is the IP address range in CIDR notation that is associated
+// with the resource.
+type IpamDiscoveredResourceCidr struct {
+ _ struct{} `type:"structure"`
+
+ // The percentage of IP address space in use. To convert the decimal to a percentage,
+ // multiply the decimal by 100. Note the following:
+ //
+ // * For resources that are VPCs, this is the percentage of IP address space
+ // in the VPC that's taken up by subnet CIDRs.
+ //
+ // * For resources that are subnets, if the subnet has an IPv4 CIDR provisioned
+ // to it, this is the percentage of IPv4 address space in the subnet that's
+ // in use. If the subnet has an IPv6 CIDR provisioned to it, the percentage
+ // of IPv6 address space in use is not represented. The percentage of IPv6
+ // address space in use cannot currently be calculated.
+ //
+ // * For resources that are public IPv4 pools, this is the percentage of
+ // IP address space in the pool that's been allocated to Elastic IP addresses
+ // (EIPs).
+ IpUsage *float64 `locationName:"ipUsage" type:"double"`
+
+ // The resource discovery ID.
+ IpamResourceDiscoveryId *string `locationName:"ipamResourceDiscoveryId" type:"string"`
+
+ // The resource CIDR.
+ ResourceCidr *string `locationName:"resourceCidr" type:"string"`
+
+ // The resource ID.
+ ResourceId *string `locationName:"resourceId" type:"string"`
+
+ // The resource owner ID.
+ ResourceOwnerId *string `locationName:"resourceOwnerId" type:"string"`
+
+ // The resource Region.
+ ResourceRegion *string `locationName:"resourceRegion" type:"string"`
+
+ // The resource tags.
+ ResourceTags []*IpamResourceTag `locationName:"resourceTagSet" locationNameList:"item" type:"list"`
+
+ // The resource type.
+ ResourceType *string `locationName:"resourceType" type:"string" enum:"IpamResourceType"`
+
+ // The last successful resource discovery time.
+ SampleTime *time.Time `locationName:"sampleTime" type:"timestamp"`
+
+ // The VPC ID.
+ VpcId *string `locationName:"vpcId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamDiscoveredResourceCidr) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamDiscoveredResourceCidr) GoString() string {
+ return s.String()
+}
+
+// SetIpUsage sets the IpUsage field's value.
+func (s *IpamDiscoveredResourceCidr) SetIpUsage(v float64) *IpamDiscoveredResourceCidr {
+ s.IpUsage = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryId sets the IpamResourceDiscoveryId field's value.
+func (s *IpamDiscoveredResourceCidr) SetIpamResourceDiscoveryId(v string) *IpamDiscoveredResourceCidr {
+ s.IpamResourceDiscoveryId = &v
+ return s
+}
+
+// SetResourceCidr sets the ResourceCidr field's value.
+func (s *IpamDiscoveredResourceCidr) SetResourceCidr(v string) *IpamDiscoveredResourceCidr {
+ s.ResourceCidr = &v
+ return s
+}
+
+// SetResourceId sets the ResourceId field's value.
+func (s *IpamDiscoveredResourceCidr) SetResourceId(v string) *IpamDiscoveredResourceCidr {
+ s.ResourceId = &v
+ return s
+}
+
+// SetResourceOwnerId sets the ResourceOwnerId field's value.
+func (s *IpamDiscoveredResourceCidr) SetResourceOwnerId(v string) *IpamDiscoveredResourceCidr {
+ s.ResourceOwnerId = &v
+ return s
+}
+
+// SetResourceRegion sets the ResourceRegion field's value.
+func (s *IpamDiscoveredResourceCidr) SetResourceRegion(v string) *IpamDiscoveredResourceCidr {
+ s.ResourceRegion = &v
+ return s
+}
+
+// SetResourceTags sets the ResourceTags field's value.
+func (s *IpamDiscoveredResourceCidr) SetResourceTags(v []*IpamResourceTag) *IpamDiscoveredResourceCidr {
+ s.ResourceTags = v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *IpamDiscoveredResourceCidr) SetResourceType(v string) *IpamDiscoveredResourceCidr {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSampleTime sets the SampleTime field's value.
+func (s *IpamDiscoveredResourceCidr) SetSampleTime(v time.Time) *IpamDiscoveredResourceCidr {
+ s.SampleTime = &v
+ return s
+}
+
+// SetVpcId sets the VpcId field's value.
+func (s *IpamDiscoveredResourceCidr) SetVpcId(v string) *IpamDiscoveredResourceCidr {
+ s.VpcId = &v
+ return s
+}
+
+// The discovery failure reason.
+type IpamDiscoveryFailureReason struct {
+ _ struct{} `type:"structure"`
+
+ // The discovery failure code.
+ //
+ // * assume-role-failure - IPAM could not assume the Amazon Web Services
+ // IAM service-linked role. This could be because of any of the following:
+ // SLR has not been created yet and IPAM is still creating it. You have opted-out
+ // of the IPAM home Region. Account you are using as your IPAM account has
+ // been suspended.
+ //
+ // * throttling-failure - IPAM account is already using the allotted transactions
+ // per second and IPAM is receiving a throttling error when assuming the
+ // Amazon Web Services IAM SLR.
+ //
+ // * unauthorized-failure - Amazon Web Services account making the request
+ // is not authorized. For more information, see AuthFailure (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html)
+ // in the Amazon Elastic Compute Cloud API Reference.
+ Code *string `locationName:"code" type:"string" enum:"IpamDiscoveryFailureCode"`
+
+ // The discovery failure message.
+ Message *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamDiscoveryFailureReason) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamDiscoveryFailureReason) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *IpamDiscoveryFailureReason) SetCode(v string) *IpamDiscoveryFailureReason {
+ s.Code = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *IpamDiscoveryFailureReason) SetMessage(v string) *IpamDiscoveryFailureReason {
+ s.Message = &v
+ return s
+}
+
// The operating Regions for an IPAM. Operating Regions are Amazon Web Services
// Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only discovers
// and monitors resources in the Amazon Web Services Regions you select as operating
@@ -124713,7 +133171,7 @@ type IpamPool struct {
// The ARN of the IPAM.
IpamArn *string `locationName:"ipamArn" min:"1" type:"string"`
- // The ARN of the IPAM pool.
+ // The Amazon Resource Name (ARN) of the IPAM pool.
IpamPoolArn *string `locationName:"ipamPoolArn" min:"1" type:"string"`
// The ID of the IPAM pool.
@@ -124751,6 +133209,15 @@ type IpamPool struct {
// in the Amazon VPC IPAM User Guide.
PoolDepth *int64 `locationName:"poolDepth" type:"integer"`
+ // The IP address source for pools in the public scope. Only used for provisioning
+ // IP address CIDRs to pools in the public scope. Default is BYOIP. For more
+ // information, see Create IPv6 pools (https://docs.aws.amazon.com/vpc/latest/ipam/intro-create-ipv6-pools.html)
+ // in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided
+ // IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the
+ // default limit, see Quotas for your IPAM (https://docs.aws.amazon.com/vpc/latest/ipam/quotas-ipam.html)
+ // in the Amazon VPC IPAM User Guide.
+ PublicIpSource *string `locationName:"publicIpSource" type:"string" enum:"IpamPoolPublicIpSource"`
+
// Determines if a pool is publicly advertisable. This option is not available
// for pools with AddressFamily set to ipv4.
PubliclyAdvertisable *bool `locationName:"publiclyAdvertisable" type:"boolean"`
@@ -124892,6 +133359,12 @@ func (s *IpamPool) SetPoolDepth(v int64) *IpamPool {
return s
}
+// SetPublicIpSource sets the PublicIpSource field's value.
+func (s *IpamPool) SetPublicIpSource(v string) *IpamPool {
+ s.PublicIpSource = &v
+ return s
+}
+
// SetPubliclyAdvertisable sets the PubliclyAdvertisable field's value.
func (s *IpamPool) SetPubliclyAdvertisable(v bool) *IpamPool {
s.PubliclyAdvertisable = &v
@@ -124923,7 +133396,7 @@ func (s *IpamPool) SetTags(v []*Tag) *IpamPool {
}
// In IPAM, an allocation is a CIDR assignment from an IPAM pool to another
-// resource or IPAM pool.
+// IPAM pool or to a resource.
type IpamPoolAllocation struct {
_ struct{} `type:"structure"`
@@ -125025,6 +133498,15 @@ type IpamPoolCidr struct {
// Details related to why an IPAM pool CIDR failed to be provisioned.
FailureReason *IpamPoolCidrFailureReason `locationName:"failureReason" type:"structure"`
+ // The IPAM pool CIDR ID.
+ IpamPoolCidrId *string `locationName:"ipamPoolCidrId" type:"string"`
+
+ // The netmask length of the CIDR you'd like to provision to a pool. Can be
+ // used for provisioning Amazon-provided IPv6 CIDRs to top-level pools and for
+ // provisioning CIDRs to pools with source pools. Cannot be used to provision
+ // BYOIP CIDRs to top-level pools. "NetmaskLength" or "Cidr" is required.
+ NetmaskLength *int64 `locationName:"netmaskLength" type:"integer"`
+
// The state of the CIDR.
State *string `locationName:"state" type:"string" enum:"IpamPoolCidrState"`
}
@@ -125059,6 +133541,18 @@ func (s *IpamPoolCidr) SetFailureReason(v *IpamPoolCidrFailureReason) *IpamPoolC
return s
}
+// SetIpamPoolCidrId sets the IpamPoolCidrId field's value.
+func (s *IpamPoolCidr) SetIpamPoolCidrId(v string) *IpamPoolCidr {
+ s.IpamPoolCidrId = &v
+ return s
+}
+
+// SetNetmaskLength sets the NetmaskLength field's value.
+func (s *IpamPoolCidr) SetNetmaskLength(v int64) *IpamPoolCidr {
+ s.NetmaskLength = &v
+ return s
+}
+
// SetState sets the State field's value.
func (s *IpamPoolCidr) SetState(v string) *IpamPoolCidr {
s.State = &v
@@ -125118,8 +133612,8 @@ type IpamResourceCidr struct {
// The percentage of IP address space in use. To convert the decimal to a percentage,
// multiply the decimal by 100. Note the following:
//
- // * For a resources that are VPCs, this is the percentage of IP address
- // space in the VPC that's taken up by subnet CIDRs.
+ // * For resources that are VPCs, this is the percentage of IP address space
+ // in the VPC that's taken up by subnet CIDRs.
//
// * For resources that are subnets, if the subnet has an IPv4 CIDR provisioned
// to it, this is the percentage of IPv4 address space in the subnet that's
@@ -125285,6 +133779,301 @@ func (s *IpamResourceCidr) SetVpcId(v string) *IpamResourceCidr {
return s
}
+// A resource discovery is an IPAM component that enables IPAM to manage and
+// monitor resources that belong to the owning account.
+type IpamResourceDiscovery struct {
+ _ struct{} `type:"structure"`
+
+ // The resource discovery description.
+ Description *string `locationName:"description" type:"string"`
+
+ // The resource discovery Amazon Resource Name (ARN).
+ IpamResourceDiscoveryArn *string `locationName:"ipamResourceDiscoveryArn" type:"string"`
+
+ // The resource discovery ID.
+ IpamResourceDiscoveryId *string `locationName:"ipamResourceDiscoveryId" type:"string"`
+
+ // The resource discovery Region.
+ IpamResourceDiscoveryRegion *string `locationName:"ipamResourceDiscoveryRegion" type:"string"`
+
+ // Defines if the resource discovery is the default. The default resource discovery
+ // is the resource discovery automatically created when you create an IPAM.
+ IsDefault *bool `locationName:"isDefault" type:"boolean"`
+
+ // The operating Regions for the resource discovery. Operating Regions are Amazon
+ // Web Services Regions where the IPAM is allowed to manage IP address CIDRs.
+ // IPAM only discovers and monitors resources in the Amazon Web Services Regions
+ // you select as operating Regions.
+ OperatingRegions []*IpamOperatingRegion `locationName:"operatingRegionSet" locationNameList:"item" type:"list"`
+
+ // The ID of the owner.
+ OwnerId *string `locationName:"ownerId" type:"string"`
+
+ // The lifecycle state of the resource discovery.
+ //
+ // * create-in-progress - Resource discovery is being created.
+ //
+ // * create-complete - Resource discovery creation is complete.
+ //
+ // * create-failed - Resource discovery creation has failed.
+ //
+ // * modify-in-progress - Resource discovery is being modified.
+ //
+ // * modify-complete - Resource discovery modification is complete.
+ //
+ // * modify-failed - Resource discovery modification has failed.
+ //
+ // * delete-in-progress - Resource discovery is being deleted.
+ //
+ // * delete-complete - Resource discovery deletion is complete.
+ //
+ // * delete-failed - Resource discovery deletion has failed.
+ //
+ // * isolate-in-progress - Amazon Web Services account that created the resource
+ // discovery has been removed and the resource discovery is being isolated.
+ //
+ // * isolate-complete - Resource discovery isolation is complete.
+ //
+ // * restore-in-progress - Amazon Web Services account that created the resource
+ // discovery and was isolated has been restored.
+ State *string `locationName:"state" type:"string" enum:"IpamResourceDiscoveryState"`
+
+ // A tag is a label that you assign to an Amazon Web Services resource. Each
+ // tag consists of a key and an optional value. You can use tags to search and
+ // filter your resources or track your Amazon Web Services costs.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamResourceDiscovery) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamResourceDiscovery) GoString() string {
+ return s.String()
+}
+
+// SetDescription sets the Description field's value.
+func (s *IpamResourceDiscovery) SetDescription(v string) *IpamResourceDiscovery {
+ s.Description = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryArn sets the IpamResourceDiscoveryArn field's value.
+func (s *IpamResourceDiscovery) SetIpamResourceDiscoveryArn(v string) *IpamResourceDiscovery {
+ s.IpamResourceDiscoveryArn = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryId sets the IpamResourceDiscoveryId field's value.
+func (s *IpamResourceDiscovery) SetIpamResourceDiscoveryId(v string) *IpamResourceDiscovery {
+ s.IpamResourceDiscoveryId = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryRegion sets the IpamResourceDiscoveryRegion field's value.
+func (s *IpamResourceDiscovery) SetIpamResourceDiscoveryRegion(v string) *IpamResourceDiscovery {
+ s.IpamResourceDiscoveryRegion = &v
+ return s
+}
+
+// SetIsDefault sets the IsDefault field's value.
+func (s *IpamResourceDiscovery) SetIsDefault(v bool) *IpamResourceDiscovery {
+ s.IsDefault = &v
+ return s
+}
+
+// SetOperatingRegions sets the OperatingRegions field's value.
+func (s *IpamResourceDiscovery) SetOperatingRegions(v []*IpamOperatingRegion) *IpamResourceDiscovery {
+ s.OperatingRegions = v
+ return s
+}
+
+// SetOwnerId sets the OwnerId field's value.
+func (s *IpamResourceDiscovery) SetOwnerId(v string) *IpamResourceDiscovery {
+ s.OwnerId = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *IpamResourceDiscovery) SetState(v string) *IpamResourceDiscovery {
+ s.State = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *IpamResourceDiscovery) SetTags(v []*Tag) *IpamResourceDiscovery {
+ s.Tags = v
+ return s
+}
+
+// An IPAM resource discovery association. An associated resource discovery
+// is a resource discovery that has been associated with an IPAM. IPAM aggregates
+// the resource CIDRs discovered by the associated resource discovery.
+type IpamResourceDiscoveryAssociation struct {
+ _ struct{} `type:"structure"`
+
+ // The IPAM ARN.
+ IpamArn *string `locationName:"ipamArn" min:"1" type:"string"`
+
+ // The IPAM ID.
+ IpamId *string `locationName:"ipamId" type:"string"`
+
+ // The IPAM home Region.
+ IpamRegion *string `locationName:"ipamRegion" type:"string"`
+
+ // The resource discovery association Amazon Resource Name (ARN).
+ IpamResourceDiscoveryAssociationArn *string `locationName:"ipamResourceDiscoveryAssociationArn" type:"string"`
+
+ // The resource discovery association ID.
+ IpamResourceDiscoveryAssociationId *string `locationName:"ipamResourceDiscoveryAssociationId" type:"string"`
+
+ // The resource discovery ID.
+ IpamResourceDiscoveryId *string `locationName:"ipamResourceDiscoveryId" type:"string"`
+
+ // Defines if the resource discovery is the default. When you create an IPAM,
+ // a default resource discovery is created for your IPAM and it's associated
+ // with your IPAM.
+ IsDefault *bool `locationName:"isDefault" type:"boolean"`
+
+ // The Amazon Web Services account ID of the resource discovery owner.
+ OwnerId *string `locationName:"ownerId" type:"string"`
+
+ // The resource discovery status.
+ //
+ // * active - Connection or permissions required to read the results of the
+ // resource discovery are intact.
+ //
+ // * not-found - Connection or permissions required to read the results of
+ // the resource discovery are broken. This may happen if the owner of the
+ // resource discovery stopped sharing it or deleted the resource discovery.
+ // Verify the resource discovery still exists and the Amazon Web Services
+ // RAM resource share is still intact.
+ ResourceDiscoveryStatus *string `locationName:"resourceDiscoveryStatus" type:"string" enum:"IpamAssociatedResourceDiscoveryStatus"`
+
+ // The lifecycle state of the association when you associate or disassociate
+ // a resource discovery.
+ //
+ // * associate-in-progress - Resource discovery is being associated.
+ //
+ // * associate-complete - Resource discovery association is complete.
+ //
+ // * associate-failed - Resource discovery association has failed.
+ //
+ // * disassociate-in-progress - Resource discovery is being disassociated.
+ //
+ // * disassociate-complete - Resource discovery disassociation is complete.
+ //
+ // * disassociate-failed - Resource discovery disassociation has failed.
+ //
+ // * isolate-in-progress - Amazon Web Services account that created the resource
+ // discovery association has been removed and the resource discovery associatation
+ // is being isolated.
+ //
+ // * isolate-complete - Resource discovery isolation is complete..
+ //
+ // * restore-in-progress - Resource discovery is being restored.
+ State *string `locationName:"state" type:"string" enum:"IpamResourceDiscoveryAssociationState"`
+
+ // A tag is a label that you assign to an Amazon Web Services resource. Each
+ // tag consists of a key and an optional value. You can use tags to search and
+ // filter your resources or track your Amazon Web Services costs.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamResourceDiscoveryAssociation) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s IpamResourceDiscoveryAssociation) GoString() string {
+ return s.String()
+}
+
+// SetIpamArn sets the IpamArn field's value.
+func (s *IpamResourceDiscoveryAssociation) SetIpamArn(v string) *IpamResourceDiscoveryAssociation {
+ s.IpamArn = &v
+ return s
+}
+
+// SetIpamId sets the IpamId field's value.
+func (s *IpamResourceDiscoveryAssociation) SetIpamId(v string) *IpamResourceDiscoveryAssociation {
+ s.IpamId = &v
+ return s
+}
+
+// SetIpamRegion sets the IpamRegion field's value.
+func (s *IpamResourceDiscoveryAssociation) SetIpamRegion(v string) *IpamResourceDiscoveryAssociation {
+ s.IpamRegion = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryAssociationArn sets the IpamResourceDiscoveryAssociationArn field's value.
+func (s *IpamResourceDiscoveryAssociation) SetIpamResourceDiscoveryAssociationArn(v string) *IpamResourceDiscoveryAssociation {
+ s.IpamResourceDiscoveryAssociationArn = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryAssociationId sets the IpamResourceDiscoveryAssociationId field's value.
+func (s *IpamResourceDiscoveryAssociation) SetIpamResourceDiscoveryAssociationId(v string) *IpamResourceDiscoveryAssociation {
+ s.IpamResourceDiscoveryAssociationId = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryId sets the IpamResourceDiscoveryId field's value.
+func (s *IpamResourceDiscoveryAssociation) SetIpamResourceDiscoveryId(v string) *IpamResourceDiscoveryAssociation {
+ s.IpamResourceDiscoveryId = &v
+ return s
+}
+
+// SetIsDefault sets the IsDefault field's value.
+func (s *IpamResourceDiscoveryAssociation) SetIsDefault(v bool) *IpamResourceDiscoveryAssociation {
+ s.IsDefault = &v
+ return s
+}
+
+// SetOwnerId sets the OwnerId field's value.
+func (s *IpamResourceDiscoveryAssociation) SetOwnerId(v string) *IpamResourceDiscoveryAssociation {
+ s.OwnerId = &v
+ return s
+}
+
+// SetResourceDiscoveryStatus sets the ResourceDiscoveryStatus field's value.
+func (s *IpamResourceDiscoveryAssociation) SetResourceDiscoveryStatus(v string) *IpamResourceDiscoveryAssociation {
+ s.ResourceDiscoveryStatus = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *IpamResourceDiscoveryAssociation) SetState(v string) *IpamResourceDiscoveryAssociation {
+ s.State = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *IpamResourceDiscoveryAssociation) SetTags(v []*Tag) *IpamResourceDiscoveryAssociation {
+ s.Tags = v
+ return s
+}
+
// The key/value combination of a tag assigned to the resource. Use the tag
// key in the filter name and the tag value as the filter value. For example,
// to find all resources that have a tag with the key Owner and the value TeamA,
@@ -125351,7 +134140,7 @@ type IpamScope struct {
// The Amazon Web Services Region of the IPAM scope.
IpamRegion *string `locationName:"ipamRegion" type:"string"`
- // The ARN of the scope.
+ // The Amazon Resource Name (ARN) of the scope.
IpamScopeArn *string `locationName:"ipamScopeArn" min:"1" type:"string"`
// The ID of the scope.
@@ -126143,8 +134932,13 @@ type LaunchSpecification struct {
// The ID of the subnet in which to launch the instance.
SubnetId *string `locationName:"subnetId" type:"string"`
- // The Base64-encoded user data for the instance.
- UserData *string `locationName:"userData" type:"string"`
+ // The base64-encoded user data that instances use when starting up. User data
+ // is limited to 16 KB.
+ //
+ // UserData is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by LaunchSpecification's
+ // String and GoString methods.
+ UserData *string `locationName:"userData" type:"string" sensitive:"true"`
}
// String returns the string representation.
@@ -127471,19 +136265,20 @@ type LaunchTemplateInstanceMetadataOptions struct {
// Possible values: Integers from 1 to 64
HttpPutResponseHopLimit *int64 `locationName:"httpPutResponseHopLimit" type:"integer"`
- // The state of token usage for your instance metadata requests. If the parameter
- // is not specified in the request, the default state is optional.
+ // Indicates whether IMDSv2 is optional or required.
//
- // If the state is optional, you can choose to retrieve instance metadata with
- // or without a signed token header on your request. If you retrieve the IAM
- // role credentials without a token, the version 1.0 role credentials are returned.
- // If you retrieve the IAM role credentials using a valid signed token, the
- // version 2.0 role credentials are returned.
+ // optional - When IMDSv2 is optional, you can choose to retrieve instance metadata
+ // with or without a session token in your request. If you retrieve the IAM
+ // role credentials without a token, the IMDSv1 role credentials are returned.
+ // If you retrieve the IAM role credentials using a valid session token, the
+ // IMDSv2 role credentials are returned.
//
- // If the state is required, you must send a signed token header with any instance
- // metadata retrieval requests. In this state, retrieving the IAM role credentials
- // always returns the version 2.0 credentials; the version 1.0 credentials are
- // not available.
+ // required - When IMDSv2 is required, you must send a session token with any
+ // instance metadata retrieval requests. In this state, retrieving the IAM role
+ // credentials always returns IMDSv2 credentials; IMDSv1 credentials are not
+ // available.
+ //
+ // Default: optional
HttpTokens *string `locationName:"httpTokens" type:"string" enum:"LaunchTemplateHttpTokensState"`
// Set to enabled to allow access to instance tags from the instance metadata.
@@ -127583,19 +136378,22 @@ type LaunchTemplateInstanceMetadataOptionsRequest struct {
// Possible values: Integers from 1 to 64
HttpPutResponseHopLimit *int64 `type:"integer"`
- // The state of token usage for your instance metadata requests. If the parameter
- // is not specified in the request, the default state is optional.
+ // IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional
+ // (in other words, set the use of IMDSv2 to optional) or required (in other
+ // words, set the use of IMDSv2 to required).
//
- // If the state is optional, you can choose to retrieve instance metadata with
- // or without a signed token header on your request. If you retrieve the IAM
- // role credentials without a token, the version 1.0 role credentials are returned.
- // If you retrieve the IAM role credentials using a valid signed token, the
- // version 2.0 role credentials are returned.
+ // * optional - When IMDSv2 is optional, you can choose to retrieve instance
+ // metadata with or without a session token in your request. If you retrieve
+ // the IAM role credentials without a token, the IMDSv1 role credentials
+ // are returned. If you retrieve the IAM role credentials using a valid session
+ // token, the IMDSv2 role credentials are returned.
//
- // If the state is required, you must send a signed token header with any instance
- // metadata retrieval requests. In this state, retrieving the IAM role credentials
- // always returns the version 2.0 credentials; the version 1.0 credentials are
- // not available.
+ // * required - When IMDSv2 is required, you must send a session token with
+ // any instance metadata retrieval requests. In this state, retrieving the
+ // IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials
+ // are not available.
+ //
+ // Default: optional
HttpTokens *string `type:"string" enum:"LaunchTemplateHttpTokensState"`
// Set to enabled to allow access to instance tags from the instance metadata.
@@ -129124,14 +137922,13 @@ type ListImagesInRecycleBinInput struct {
// that are in the Recycle Bin. You can specify up to 20 IDs in a single request.
ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
- //
- // If you do not specify a value for MaxResults, the request returns 1,000 items
- // per page by default. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"1" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
}
@@ -129196,8 +137993,8 @@ type ListImagesInRecycleBinOutput struct {
// Information about the AMIs.
Images []*ImageRecycleBinInfo `locationName:"imageSet" locationNameList:"item" type:"list"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
@@ -129240,11 +138037,13 @@ type ListSnapshotsInRecycleBinInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // The maximum number of results to return with a single call. To retrieve the
- // remaining results, make another call with the returned nextToken value.
+ // The maximum number of items to return for this request. To get the next page
+ // of items, make another request with the token returned in the output. For
+ // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination).
MaxResults *int64 `min:"5" type:"integer"`
- // The token for the next page of results.
+ // The token returned from a previous paginated request. Pagination continues
+ // from the end of the items returned by the previous request.
NextToken *string `type:"string"`
// The IDs of the snapshots to list. Omit this parameter to list all of the
@@ -129310,8 +138109,8 @@ func (s *ListSnapshotsInRecycleBinInput) SetSnapshotIds(v []*string) *ListSnapsh
type ListSnapshotsInRecycleBinOutput struct {
_ struct{} `type:"structure"`
- // The token to use to retrieve the next page of results. This value is null
- // when there are no more results to return.
+ // The token to include in another request to get the next page of items. This
+ // value is null when there are no more items to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the snapshots.
@@ -129611,6 +138410,9 @@ type LocalGatewayRoute struct {
// The CIDR block used for destination matches.
DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"`
+ // The ID of the prefix list.
+ DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"`
+
// The Amazon Resource Name (ARN) of the local gateway route table.
LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"`
@@ -129666,6 +138468,12 @@ func (s *LocalGatewayRoute) SetDestinationCidrBlock(v string) *LocalGatewayRoute
return s
}
+// SetDestinationPrefixListId sets the DestinationPrefixListId field's value.
+func (s *LocalGatewayRoute) SetDestinationPrefixListId(v string) *LocalGatewayRoute {
+ s.DestinationPrefixListId = &v
+ return s
+}
+
// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value.
func (s *LocalGatewayRoute) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRoute {
s.LocalGatewayRouteTableArn = &v
@@ -130188,6 +138996,56 @@ func (s *LocalGatewayVirtualInterfaceGroup) SetTags(v []*Tag) *LocalGatewayVirtu
return s
}
+// Details for Site-to-Site VPN tunnel endpoint maintenance events.
+type MaintenanceDetails struct {
+ _ struct{} `type:"structure"`
+
+ // Timestamp of last applied maintenance.
+ LastMaintenanceApplied *time.Time `locationName:"lastMaintenanceApplied" type:"timestamp"`
+
+ // The timestamp after which Amazon Web Services will automatically apply maintenance.
+ MaintenanceAutoAppliedAfter *time.Time `locationName:"maintenanceAutoAppliedAfter" type:"timestamp"`
+
+ // Verify existence of a pending maintenance.
+ PendingMaintenance *string `locationName:"pendingMaintenance" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s MaintenanceDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s MaintenanceDetails) GoString() string {
+ return s.String()
+}
+
+// SetLastMaintenanceApplied sets the LastMaintenanceApplied field's value.
+func (s *MaintenanceDetails) SetLastMaintenanceApplied(v time.Time) *MaintenanceDetails {
+ s.LastMaintenanceApplied = &v
+ return s
+}
+
+// SetMaintenanceAutoAppliedAfter sets the MaintenanceAutoAppliedAfter field's value.
+func (s *MaintenanceDetails) SetMaintenanceAutoAppliedAfter(v time.Time) *MaintenanceDetails {
+ s.MaintenanceAutoAppliedAfter = &v
+ return s
+}
+
+// SetPendingMaintenance sets the PendingMaintenance field's value.
+func (s *MaintenanceDetails) SetPendingMaintenance(v string) *MaintenanceDetails {
+ s.PendingMaintenance = &v
+ return s
+}
+
// Describes a managed prefix list.
type ManagedPrefixList struct {
_ struct{} `type:"structure"`
@@ -130520,7 +139378,7 @@ func (s *MemoryMiBRequest) SetMin(v int64) *MemoryMiBRequest {
return s
}
-// Indicates whether the network was healthy or unhealthy at a particular point.
+// Indicates whether the network was healthy or degraded at a particular point.
// The value is aggregated from the startDate to the endDate. Currently only
// five_minutes is supported.
type MetricPoint struct {
@@ -131525,6 +140383,8 @@ type ModifyFleetInput struct {
// Indicates whether running instances should be terminated if the total target
// capacity of the EC2 Fleet is decreased below the current size of the EC2
// Fleet.
+ //
+ // Supported only for fleets of type maintain.
ExcessCapacityTerminationPolicy *string `type:"string" enum:"FleetExcessCapacityTerminationPolicy"`
// The ID of the EC2 Fleet.
@@ -131828,6 +140688,11 @@ type ModifyHostsInput struct {
// HostIds is a required field
HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"`
+ // Indicates whether to enable or disable host maintenance for the Dedicated
+ // Host. For more information, see Host maintenance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-maintenance.html)
+ // in the Amazon EC2 User Guide.
+ HostMaintenance *string `type:"string" enum:"HostMaintenance"`
+
// Indicates whether to enable or disable host recovery for the Dedicated Host.
// For more information, see Host recovery (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html)
// in the Amazon EC2 User Guide.
@@ -131895,6 +140760,12 @@ func (s *ModifyHostsInput) SetHostIds(v []*string) *ModifyHostsInput {
return s
}
+// SetHostMaintenance sets the HostMaintenance field's value.
+func (s *ModifyHostsInput) SetHostMaintenance(v string) *ModifyHostsInput {
+ s.HostMaintenance = &v
+ return s
+}
+
// SetHostRecovery sets the HostRecovery field's value.
func (s *ModifyHostsInput) SetHostRecovery(v string) *ModifyHostsInput {
s.HostRecovery = &v
@@ -132158,7 +141029,7 @@ type ModifyImageAttributeInput struct {
// The name of the attribute to modify.
//
- // Valid values: description | launchPermission
+ // Valid values: description | imdsSupport | launchPermission
Attribute *string `type:"string"`
// A new description for the AMI.
@@ -132175,6 +141046,18 @@ type ModifyImageAttributeInput struct {
// ImageId is a required field
ImageId *string `type:"string" required:"true"`
+ // Set to v2.0 to indicate that IMDSv2 is specified in the AMI. Instances launched
+ // from this AMI will have HttpTokens automatically set to required so that,
+ // by default, the instance requires that IMDSv2 is used when requesting instance
+ // metadata. In addition, HttpPutResponseHopLimit is set to 2. For more information,
+ // see Configure the AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html#configure-IMDS-new-instances-ami-configuration)
+ // in the Amazon EC2 User Guide.
+ //
+ // Do not use this parameter unless your AMI software supports IMDSv2. After
+ // you set the value to v2.0, you can't undo it. The only way to “reset”
+ // your AMI is to create a new AMI from the underlying snapshot.
+ ImdsSupport *AttributeValue `type:"structure"`
+
// A new launch permission for the AMI.
LaunchPermission *LaunchPermissionModifications `type:"structure"`
@@ -132202,7 +141085,7 @@ type ModifyImageAttributeInput struct {
UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"`
// The value of the attribute being modified. This parameter can be used only
- // when the Attribute parameter is description.
+ // when the Attribute parameter is description or imdsSupport.
Value *string `type:"string"`
}
@@ -132261,6 +141144,12 @@ func (s *ModifyImageAttributeInput) SetImageId(v string) *ModifyImageAttributeIn
return s
}
+// SetImdsSupport sets the ImdsSupport field's value.
+func (s *ModifyImageAttributeInput) SetImdsSupport(v *AttributeValue) *ModifyImageAttributeInput {
+ s.ImdsSupport = v
+ return s
+}
+
// SetLaunchPermission sets the LaunchPermission field's value.
func (s *ModifyImageAttributeInput) SetLaunchPermission(v *LaunchPermissionModifications) *ModifyImageAttributeInput {
s.LaunchPermission = v
@@ -132743,6 +141632,16 @@ func (s *ModifyInstanceCreditSpecificationInput) Validate() error {
if s.InstanceCreditSpecifications == nil {
invalidParams.Add(request.NewErrParamRequired("InstanceCreditSpecifications"))
}
+ if s.InstanceCreditSpecifications != nil {
+ for i, v := range s.InstanceCreditSpecifications {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceCreditSpecifications", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -133196,19 +142095,22 @@ type ModifyInstanceMetadataOptionsInput struct {
// Possible values: Integers from 1 to 64
HttpPutResponseHopLimit *int64 `type:"integer"`
- // The state of token usage for your instance metadata requests. If the parameter
- // is not specified in the request, the default state is optional.
+ // IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional
+ // (in other words, set the use of IMDSv2 to optional) or required (in other
+ // words, set the use of IMDSv2 to required).
//
- // If the state is optional, you can choose to retrieve instance metadata with
- // or without a session token on your request. If you retrieve the IAM role
- // credentials without a token, the version 1.0 role credentials are returned.
- // If you retrieve the IAM role credentials using a valid session token, the
- // version 2.0 role credentials are returned.
+ // * optional - When IMDSv2 is optional, you can choose to retrieve instance
+ // metadata with or without a session token in your request. If you retrieve
+ // the IAM role credentials without a token, the IMDSv1 role credentials
+ // are returned. If you retrieve the IAM role credentials using a valid session
+ // token, the IMDSv2 role credentials are returned.
//
- // If the state is required, you must send a session token with any instance
- // metadata retrieval requests. In this state, retrieving the IAM role credentials
- // always returns the version 2.0 credentials; the version 1.0 credentials are
- // not available.
+ // * required - When IMDSv2 is required, you must send a session token with
+ // any instance metadata retrieval requests. In this state, retrieving the
+ // IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials
+ // are not available.
+ //
+ // Default: optional
HttpTokens *string `type:"string" enum:"HttpTokensState"`
// The ID of the instance.
@@ -133948,6 +142850,125 @@ func (s *ModifyIpamResourceCidrOutput) SetIpamResourceCidr(v *IpamResourceCidr)
return s
}
+type ModifyIpamResourceDiscoveryInput struct {
+ _ struct{} `type:"structure"`
+
+ // Add operating Regions to the resource discovery. Operating Regions are Amazon
+ // Web Services Regions where the IPAM is allowed to manage IP address CIDRs.
+ // IPAM only discovers and monitors resources in the Amazon Web Services Regions
+ // you select as operating Regions.
+ AddOperatingRegions []*AddIpamOperatingRegion `locationName:"AddOperatingRegion" type:"list"`
+
+ // A resource discovery description.
+ Description *string `type:"string"`
+
+ // A check for whether you have the required permissions for the action without
+ // actually making the request and provides an error response. If you have the
+ // required permissions, the error response is DryRunOperation. Otherwise, it
+ // is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // A resource discovery ID.
+ //
+ // IpamResourceDiscoveryId is a required field
+ IpamResourceDiscoveryId *string `type:"string" required:"true"`
+
+ // Remove operating Regions.
+ RemoveOperatingRegions []*RemoveIpamOperatingRegion `locationName:"RemoveOperatingRegion" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyIpamResourceDiscoveryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyIpamResourceDiscoveryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyIpamResourceDiscoveryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyIpamResourceDiscoveryInput"}
+ if s.IpamResourceDiscoveryId == nil {
+ invalidParams.Add(request.NewErrParamRequired("IpamResourceDiscoveryId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAddOperatingRegions sets the AddOperatingRegions field's value.
+func (s *ModifyIpamResourceDiscoveryInput) SetAddOperatingRegions(v []*AddIpamOperatingRegion) *ModifyIpamResourceDiscoveryInput {
+ s.AddOperatingRegions = v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *ModifyIpamResourceDiscoveryInput) SetDescription(v string) *ModifyIpamResourceDiscoveryInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyIpamResourceDiscoveryInput) SetDryRun(v bool) *ModifyIpamResourceDiscoveryInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetIpamResourceDiscoveryId sets the IpamResourceDiscoveryId field's value.
+func (s *ModifyIpamResourceDiscoveryInput) SetIpamResourceDiscoveryId(v string) *ModifyIpamResourceDiscoveryInput {
+ s.IpamResourceDiscoveryId = &v
+ return s
+}
+
+// SetRemoveOperatingRegions sets the RemoveOperatingRegions field's value.
+func (s *ModifyIpamResourceDiscoveryInput) SetRemoveOperatingRegions(v []*RemoveIpamOperatingRegion) *ModifyIpamResourceDiscoveryInput {
+ s.RemoveOperatingRegions = v
+ return s
+}
+
+type ModifyIpamResourceDiscoveryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A resource discovery.
+ IpamResourceDiscovery *IpamResourceDiscovery `locationName:"ipamResourceDiscovery" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyIpamResourceDiscoveryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyIpamResourceDiscoveryOutput) GoString() string {
+ return s.String()
+}
+
+// SetIpamResourceDiscovery sets the IpamResourceDiscovery field's value.
+func (s *ModifyIpamResourceDiscoveryOutput) SetIpamResourceDiscovery(v *IpamResourceDiscovery) *ModifyIpamResourceDiscoveryOutput {
+ s.IpamResourceDiscovery = v
+ return s
+}
+
type ModifyIpamScopeInput struct {
_ struct{} `type:"structure"`
@@ -134174,9 +143195,12 @@ type ModifyLocalGatewayRouteInput struct {
// The CIDR block used for destination matches. The value that you provide must
// match the CIDR of an existing route in the table.
- //
- // DestinationCidrBlock is a required field
- DestinationCidrBlock *string `type:"string" required:"true"`
+ DestinationCidrBlock *string `type:"string"`
+
+ // The ID of the prefix list. Use a prefix list in place of DestinationCidrBlock.
+ // You cannot use DestinationPrefixListId and DestinationCidrBlock in the same
+ // request.
+ DestinationPrefixListId *string `type:"string"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
@@ -134217,9 +143241,6 @@ func (s ModifyLocalGatewayRouteInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *ModifyLocalGatewayRouteInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ModifyLocalGatewayRouteInput"}
- if s.DestinationCidrBlock == nil {
- invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock"))
- }
if s.LocalGatewayRouteTableId == nil {
invalidParams.Add(request.NewErrParamRequired("LocalGatewayRouteTableId"))
}
@@ -134236,6 +143257,12 @@ func (s *ModifyLocalGatewayRouteInput) SetDestinationCidrBlock(v string) *Modify
return s
}
+// SetDestinationPrefixListId sets the DestinationPrefixListId field's value.
+func (s *ModifyLocalGatewayRouteInput) SetDestinationPrefixListId(v string) *ModifyLocalGatewayRouteInput {
+ s.DestinationPrefixListId = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *ModifyLocalGatewayRouteInput) SetDryRun(v bool) *ModifyLocalGatewayRouteInput {
s.DryRun = &v
@@ -134605,7 +143632,9 @@ type ModifyPrivateDnsNameOptionsInput struct {
EnableResourceNameDnsARecord *bool `type:"boolean"`
// The ID of the instance.
- InstanceId *string `type:"string"`
+ //
+ // InstanceId is a required field
+ InstanceId *string `type:"string" required:"true"`
// The type of hostname for EC2 instances. For IPv4 only subnets, an instance
// DNS name must be based on the instance IPv4 address. For IPv6 only subnets,
@@ -134633,6 +143662,19 @@ func (s ModifyPrivateDnsNameOptionsInput) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyPrivateDnsNameOptionsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyPrivateDnsNameOptionsInput"}
+ if s.InstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetDryRun sets the DryRun field's value.
func (s *ModifyPrivateDnsNameOptionsInput) SetDryRun(v bool) *ModifyPrivateDnsNameOptionsInput {
s.DryRun = &v
@@ -134844,6 +143886,16 @@ func (s *ModifySecurityGroupRulesInput) Validate() error {
if s.SecurityGroupRules == nil {
invalidParams.Add(request.NewErrParamRequired("SecurityGroupRules"))
}
+ if s.SecurityGroupRules != nil {
+ for i, v := range s.SecurityGroupRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecurityGroupRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -135140,9 +144192,11 @@ type ModifySpotFleetRequestInput struct {
// Reserved.
Context *string `type:"string"`
- // Indicates whether running Spot Instances should be terminated if the target
- // capacity of the Spot Fleet request is decreased below the current size of
- // the Spot Fleet.
+ // Indicates whether running instances should be terminated if the target capacity
+ // of the Spot Fleet request is decreased below the current size of the Spot
+ // Fleet.
+ //
+ // Supported only for fleets of type maintain.
ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"`
// The launch template and overrides. You can only use this parameter if you
@@ -136430,6 +145484,1033 @@ func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetIpv6Support(v strin
return s
}
+// Options for a network-interface type Verified Access endpoint.
+type ModifyVerifiedAccessEndpointEniOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The IP port number.
+ Port *int64 `min:"1" type:"integer"`
+
+ // The IP protocol.
+ Protocol *string `type:"string" enum:"VerifiedAccessEndpointProtocol"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointEniOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointEniOptions) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessEndpointEniOptions) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessEndpointEniOptions"}
+ if s.Port != nil && *s.Port < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Port", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPort sets the Port field's value.
+func (s *ModifyVerifiedAccessEndpointEniOptions) SetPort(v int64) *ModifyVerifiedAccessEndpointEniOptions {
+ s.Port = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *ModifyVerifiedAccessEndpointEniOptions) SetProtocol(v string) *ModifyVerifiedAccessEndpointEniOptions {
+ s.Protocol = &v
+ return s
+}
+
+type ModifyVerifiedAccessEndpointInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the Amazon Web Services Verified Access endpoint.
+ Description *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The load balancer details if creating the Amazon Web Services Verified Access
+ // endpoint as load-balancertype.
+ LoadBalancerOptions *ModifyVerifiedAccessEndpointLoadBalancerOptions `type:"structure"`
+
+ // The network interface options.
+ NetworkInterfaceOptions *ModifyVerifiedAccessEndpointEniOptions `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ //
+ // VerifiedAccessEndpointId is a required field
+ VerifiedAccessEndpointId *string `type:"string" required:"true"`
+
+ // The ID of the Amazon Web Services Verified Access group.
+ VerifiedAccessGroupId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessEndpointInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessEndpointInput"}
+ if s.VerifiedAccessEndpointId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessEndpointId"))
+ }
+ if s.LoadBalancerOptions != nil {
+ if err := s.LoadBalancerOptions.Validate(); err != nil {
+ invalidParams.AddNested("LoadBalancerOptions", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.NetworkInterfaceOptions != nil {
+ if err := s.NetworkInterfaceOptions.Validate(); err != nil {
+ invalidParams.AddNested("NetworkInterfaceOptions", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *ModifyVerifiedAccessEndpointInput) SetClientToken(v string) *ModifyVerifiedAccessEndpointInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *ModifyVerifiedAccessEndpointInput) SetDescription(v string) *ModifyVerifiedAccessEndpointInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyVerifiedAccessEndpointInput) SetDryRun(v bool) *ModifyVerifiedAccessEndpointInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetLoadBalancerOptions sets the LoadBalancerOptions field's value.
+func (s *ModifyVerifiedAccessEndpointInput) SetLoadBalancerOptions(v *ModifyVerifiedAccessEndpointLoadBalancerOptions) *ModifyVerifiedAccessEndpointInput {
+ s.LoadBalancerOptions = v
+ return s
+}
+
+// SetNetworkInterfaceOptions sets the NetworkInterfaceOptions field's value.
+func (s *ModifyVerifiedAccessEndpointInput) SetNetworkInterfaceOptions(v *ModifyVerifiedAccessEndpointEniOptions) *ModifyVerifiedAccessEndpointInput {
+ s.NetworkInterfaceOptions = v
+ return s
+}
+
+// SetVerifiedAccessEndpointId sets the VerifiedAccessEndpointId field's value.
+func (s *ModifyVerifiedAccessEndpointInput) SetVerifiedAccessEndpointId(v string) *ModifyVerifiedAccessEndpointInput {
+ s.VerifiedAccessEndpointId = &v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *ModifyVerifiedAccessEndpointInput) SetVerifiedAccessGroupId(v string) *ModifyVerifiedAccessEndpointInput {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+// Describes a load balancer when creating an Amazon Web Services Verified Access
+// endpoint using the load-balancer type.
+type ModifyVerifiedAccessEndpointLoadBalancerOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The IP port number.
+ Port *int64 `min:"1" type:"integer"`
+
+ // The IP protocol.
+ Protocol *string `type:"string" enum:"VerifiedAccessEndpointProtocol"`
+
+ // The IDs of the subnets.
+ SubnetIds []*string `locationName:"SubnetId" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointLoadBalancerOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointLoadBalancerOptions) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessEndpointLoadBalancerOptions) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessEndpointLoadBalancerOptions"}
+ if s.Port != nil && *s.Port < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Port", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPort sets the Port field's value.
+func (s *ModifyVerifiedAccessEndpointLoadBalancerOptions) SetPort(v int64) *ModifyVerifiedAccessEndpointLoadBalancerOptions {
+ s.Port = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *ModifyVerifiedAccessEndpointLoadBalancerOptions) SetProtocol(v string) *ModifyVerifiedAccessEndpointLoadBalancerOptions {
+ s.Protocol = &v
+ return s
+}
+
+// SetSubnetIds sets the SubnetIds field's value.
+func (s *ModifyVerifiedAccessEndpointLoadBalancerOptions) SetSubnetIds(v []*string) *ModifyVerifiedAccessEndpointLoadBalancerOptions {
+ s.SubnetIds = v
+ return s
+}
+
+type ModifyVerifiedAccessEndpointOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Web Services Verified Access endpoint details.
+ VerifiedAccessEndpoint *VerifiedAccessEndpoint `locationName:"verifiedAccessEndpoint" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessEndpoint sets the VerifiedAccessEndpoint field's value.
+func (s *ModifyVerifiedAccessEndpointOutput) SetVerifiedAccessEndpoint(v *VerifiedAccessEndpoint) *ModifyVerifiedAccessEndpointOutput {
+ s.VerifiedAccessEndpoint = v
+ return s
+}
+
+type ModifyVerifiedAccessEndpointPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The Amazon Web Services Verified Access policy document.
+ PolicyDocument *string `type:"string"`
+
+ // The status of the Verified Access policy.
+ //
+ // PolicyEnabled is a required field
+ PolicyEnabled *bool `type:"boolean" required:"true"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ //
+ // VerifiedAccessEndpointId is a required field
+ VerifiedAccessEndpointId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessEndpointPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessEndpointPolicyInput"}
+ if s.PolicyEnabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("PolicyEnabled"))
+ }
+ if s.VerifiedAccessEndpointId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessEndpointId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *ModifyVerifiedAccessEndpointPolicyInput) SetClientToken(v string) *ModifyVerifiedAccessEndpointPolicyInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyVerifiedAccessEndpointPolicyInput) SetDryRun(v bool) *ModifyVerifiedAccessEndpointPolicyInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetPolicyDocument sets the PolicyDocument field's value.
+func (s *ModifyVerifiedAccessEndpointPolicyInput) SetPolicyDocument(v string) *ModifyVerifiedAccessEndpointPolicyInput {
+ s.PolicyDocument = &v
+ return s
+}
+
+// SetPolicyEnabled sets the PolicyEnabled field's value.
+func (s *ModifyVerifiedAccessEndpointPolicyInput) SetPolicyEnabled(v bool) *ModifyVerifiedAccessEndpointPolicyInput {
+ s.PolicyEnabled = &v
+ return s
+}
+
+// SetVerifiedAccessEndpointId sets the VerifiedAccessEndpointId field's value.
+func (s *ModifyVerifiedAccessEndpointPolicyInput) SetVerifiedAccessEndpointId(v string) *ModifyVerifiedAccessEndpointPolicyInput {
+ s.VerifiedAccessEndpointId = &v
+ return s
+}
+
+type ModifyVerifiedAccessEndpointPolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Web Services Verified Access policy document.
+ PolicyDocument *string `locationName:"policyDocument" type:"string"`
+
+ // The status of the Verified Access policy.
+ PolicyEnabled *bool `locationName:"policyEnabled" type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessEndpointPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyDocument sets the PolicyDocument field's value.
+func (s *ModifyVerifiedAccessEndpointPolicyOutput) SetPolicyDocument(v string) *ModifyVerifiedAccessEndpointPolicyOutput {
+ s.PolicyDocument = &v
+ return s
+}
+
+// SetPolicyEnabled sets the PolicyEnabled field's value.
+func (s *ModifyVerifiedAccessEndpointPolicyOutput) SetPolicyEnabled(v bool) *ModifyVerifiedAccessEndpointPolicyOutput {
+ s.PolicyEnabled = &v
+ return s
+}
+
+type ModifyVerifiedAccessGroupInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the Amazon Web Services Verified Access group.
+ Description *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access group.
+ //
+ // VerifiedAccessGroupId is a required field
+ VerifiedAccessGroupId *string `type:"string" required:"true"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstanceId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessGroupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessGroupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessGroupInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessGroupInput"}
+ if s.VerifiedAccessGroupId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessGroupId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *ModifyVerifiedAccessGroupInput) SetClientToken(v string) *ModifyVerifiedAccessGroupInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *ModifyVerifiedAccessGroupInput) SetDescription(v string) *ModifyVerifiedAccessGroupInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyVerifiedAccessGroupInput) SetDryRun(v bool) *ModifyVerifiedAccessGroupInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *ModifyVerifiedAccessGroupInput) SetVerifiedAccessGroupId(v string) *ModifyVerifiedAccessGroupInput {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *ModifyVerifiedAccessGroupInput) SetVerifiedAccessInstanceId(v string) *ModifyVerifiedAccessGroupInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+type ModifyVerifiedAccessGroupOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Details of Amazon Web Services Verified Access group.
+ VerifiedAccessGroup *VerifiedAccessGroup `locationName:"verifiedAccessGroup" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessGroupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessGroupOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessGroup sets the VerifiedAccessGroup field's value.
+func (s *ModifyVerifiedAccessGroupOutput) SetVerifiedAccessGroup(v *VerifiedAccessGroup) *ModifyVerifiedAccessGroupOutput {
+ s.VerifiedAccessGroup = v
+ return s
+}
+
+type ModifyVerifiedAccessGroupPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The Amazon Web Services Verified Access policy document.
+ PolicyDocument *string `type:"string"`
+
+ // The status of the Verified Access policy.
+ //
+ // PolicyEnabled is a required field
+ PolicyEnabled *bool `type:"boolean" required:"true"`
+
+ // The ID of the Amazon Web Services Verified Access group.
+ //
+ // VerifiedAccessGroupId is a required field
+ VerifiedAccessGroupId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessGroupPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessGroupPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessGroupPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessGroupPolicyInput"}
+ if s.PolicyEnabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("PolicyEnabled"))
+ }
+ if s.VerifiedAccessGroupId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessGroupId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *ModifyVerifiedAccessGroupPolicyInput) SetClientToken(v string) *ModifyVerifiedAccessGroupPolicyInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyVerifiedAccessGroupPolicyInput) SetDryRun(v bool) *ModifyVerifiedAccessGroupPolicyInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetPolicyDocument sets the PolicyDocument field's value.
+func (s *ModifyVerifiedAccessGroupPolicyInput) SetPolicyDocument(v string) *ModifyVerifiedAccessGroupPolicyInput {
+ s.PolicyDocument = &v
+ return s
+}
+
+// SetPolicyEnabled sets the PolicyEnabled field's value.
+func (s *ModifyVerifiedAccessGroupPolicyInput) SetPolicyEnabled(v bool) *ModifyVerifiedAccessGroupPolicyInput {
+ s.PolicyEnabled = &v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *ModifyVerifiedAccessGroupPolicyInput) SetVerifiedAccessGroupId(v string) *ModifyVerifiedAccessGroupPolicyInput {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+type ModifyVerifiedAccessGroupPolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Web Services Verified Access policy document.
+ PolicyDocument *string `locationName:"policyDocument" type:"string"`
+
+ // The status of the Verified Access policy.
+ PolicyEnabled *bool `locationName:"policyEnabled" type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessGroupPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessGroupPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyDocument sets the PolicyDocument field's value.
+func (s *ModifyVerifiedAccessGroupPolicyOutput) SetPolicyDocument(v string) *ModifyVerifiedAccessGroupPolicyOutput {
+ s.PolicyDocument = &v
+ return s
+}
+
+// SetPolicyEnabled sets the PolicyEnabled field's value.
+func (s *ModifyVerifiedAccessGroupPolicyOutput) SetPolicyEnabled(v bool) *ModifyVerifiedAccessGroupPolicyOutput {
+ s.PolicyEnabled = &v
+ return s
+}
+
+type ModifyVerifiedAccessInstanceInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the Amazon Web Services Verified Access instance.
+ Description *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ //
+ // VerifiedAccessInstanceId is a required field
+ VerifiedAccessInstanceId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessInstanceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessInstanceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessInstanceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessInstanceInput"}
+ if s.VerifiedAccessInstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessInstanceId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *ModifyVerifiedAccessInstanceInput) SetClientToken(v string) *ModifyVerifiedAccessInstanceInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *ModifyVerifiedAccessInstanceInput) SetDescription(v string) *ModifyVerifiedAccessInstanceInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyVerifiedAccessInstanceInput) SetDryRun(v bool) *ModifyVerifiedAccessInstanceInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *ModifyVerifiedAccessInstanceInput) SetVerifiedAccessInstanceId(v string) *ModifyVerifiedAccessInstanceInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+type ModifyVerifiedAccessInstanceLoggingConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The configuration options for Amazon Web Services Verified Access instances.
+ //
+ // AccessLogs is a required field
+ AccessLogs *VerifiedAccessLogOptions `type:"structure" required:"true"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ //
+ // VerifiedAccessInstanceId is a required field
+ VerifiedAccessInstanceId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessInstanceLoggingConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessInstanceLoggingConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessInstanceLoggingConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessInstanceLoggingConfigurationInput"}
+ if s.AccessLogs == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessLogs"))
+ }
+ if s.VerifiedAccessInstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessInstanceId"))
+ }
+ if s.AccessLogs != nil {
+ if err := s.AccessLogs.Validate(); err != nil {
+ invalidParams.AddNested("AccessLogs", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessLogs sets the AccessLogs field's value.
+func (s *ModifyVerifiedAccessInstanceLoggingConfigurationInput) SetAccessLogs(v *VerifiedAccessLogOptions) *ModifyVerifiedAccessInstanceLoggingConfigurationInput {
+ s.AccessLogs = v
+ return s
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *ModifyVerifiedAccessInstanceLoggingConfigurationInput) SetClientToken(v string) *ModifyVerifiedAccessInstanceLoggingConfigurationInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyVerifiedAccessInstanceLoggingConfigurationInput) SetDryRun(v bool) *ModifyVerifiedAccessInstanceLoggingConfigurationInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *ModifyVerifiedAccessInstanceLoggingConfigurationInput) SetVerifiedAccessInstanceId(v string) *ModifyVerifiedAccessInstanceLoggingConfigurationInput {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+type ModifyVerifiedAccessInstanceLoggingConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The logging configuration for Amazon Web Services Verified Access instance.
+ LoggingConfiguration *VerifiedAccessInstanceLoggingConfiguration `locationName:"loggingConfiguration" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessInstanceLoggingConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessInstanceLoggingConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetLoggingConfiguration sets the LoggingConfiguration field's value.
+func (s *ModifyVerifiedAccessInstanceLoggingConfigurationOutput) SetLoggingConfiguration(v *VerifiedAccessInstanceLoggingConfiguration) *ModifyVerifiedAccessInstanceLoggingConfigurationOutput {
+ s.LoggingConfiguration = v
+ return s
+}
+
+type ModifyVerifiedAccessInstanceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstance *VerifiedAccessInstance `locationName:"verifiedAccessInstance" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessInstanceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessInstanceOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessInstance sets the VerifiedAccessInstance field's value.
+func (s *ModifyVerifiedAccessInstanceOutput) SetVerifiedAccessInstance(v *VerifiedAccessInstance) *ModifyVerifiedAccessInstanceOutput {
+ s.VerifiedAccessInstance = v
+ return s
+}
+
+type ModifyVerifiedAccessTrustProviderInput struct {
+ _ struct{} `type:"structure"`
+
+ // A unique, case-sensitive token that you provide to ensure idempotency of
+ // your modification request. For more information, see Ensuring Idempotency
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
+ // A description for the Amazon Web Services Verified Access trust provider.
+ Description *string `type:"string"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The OpenID Connect details for an oidc-type, user-identity based trust provider.
+ OidcOptions *ModifyVerifiedAccessTrustProviderOidcOptions `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ //
+ // VerifiedAccessTrustProviderId is a required field
+ VerifiedAccessTrustProviderId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessTrustProviderInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessTrustProviderInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ModifyVerifiedAccessTrustProviderInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ModifyVerifiedAccessTrustProviderInput"}
+ if s.VerifiedAccessTrustProviderId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VerifiedAccessTrustProviderId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientToken sets the ClientToken field's value.
+func (s *ModifyVerifiedAccessTrustProviderInput) SetClientToken(v string) *ModifyVerifiedAccessTrustProviderInput {
+ s.ClientToken = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *ModifyVerifiedAccessTrustProviderInput) SetDescription(v string) *ModifyVerifiedAccessTrustProviderInput {
+ s.Description = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ModifyVerifiedAccessTrustProviderInput) SetDryRun(v bool) *ModifyVerifiedAccessTrustProviderInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetOidcOptions sets the OidcOptions field's value.
+func (s *ModifyVerifiedAccessTrustProviderInput) SetOidcOptions(v *ModifyVerifiedAccessTrustProviderOidcOptions) *ModifyVerifiedAccessTrustProviderInput {
+ s.OidcOptions = v
+ return s
+}
+
+// SetVerifiedAccessTrustProviderId sets the VerifiedAccessTrustProviderId field's value.
+func (s *ModifyVerifiedAccessTrustProviderInput) SetVerifiedAccessTrustProviderId(v string) *ModifyVerifiedAccessTrustProviderInput {
+ s.VerifiedAccessTrustProviderId = &v
+ return s
+}
+
+// OpenID Connect options for an oidc-type, user-identity based trust provider.
+type ModifyVerifiedAccessTrustProviderOidcOptions struct {
+ _ struct{} `type:"structure"`
+
+ // OpenID Connect (OIDC) scopes are used by an application during authentication
+ // to authorize access to a user's details. Each scope returns a specific set
+ // of user attributes.
+ Scope *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessTrustProviderOidcOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessTrustProviderOidcOptions) GoString() string {
+ return s.String()
+}
+
+// SetScope sets the Scope field's value.
+func (s *ModifyVerifiedAccessTrustProviderOidcOptions) SetScope(v string) *ModifyVerifiedAccessTrustProviderOidcOptions {
+ s.Scope = &v
+ return s
+}
+
+type ModifyVerifiedAccessTrustProviderOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ VerifiedAccessTrustProvider *VerifiedAccessTrustProvider `locationName:"verifiedAccessTrustProvider" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessTrustProviderOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ModifyVerifiedAccessTrustProviderOutput) GoString() string {
+ return s.String()
+}
+
+// SetVerifiedAccessTrustProvider sets the VerifiedAccessTrustProvider field's value.
+func (s *ModifyVerifiedAccessTrustProviderOutput) SetVerifiedAccessTrustProvider(v *VerifiedAccessTrustProvider) *ModifyVerifiedAccessTrustProviderOutput {
+ s.VerifiedAccessTrustProvider = v
+ return s
+}
+
type ModifyVolumeAttributeInput struct {
_ struct{} `type:"structure"`
@@ -136802,8 +146883,8 @@ func (s ModifyVpcAttributeOutput) GoString() string {
type ModifyVpcEndpointConnectionNotificationInput struct {
_ struct{} `type:"structure"`
- // One or more events for the endpoint. Valid values are Accept, Connect, Delete,
- // and Reject.
+ // The events for the endpoint. Valid values are Accept, Connect, Delete, and
+ // Reject.
ConnectionEvents []*string `locationNameList:"item" type:"list"`
// The ARN for the SNS topic for the notification.
@@ -136907,18 +146988,17 @@ func (s *ModifyVpcEndpointConnectionNotificationOutput) SetReturnValue(v bool) *
return s
}
-// Contains the parameters for ModifyVpcEndpoint.
type ModifyVpcEndpointInput struct {
_ struct{} `type:"structure"`
- // (Gateway endpoint) One or more route tables IDs to associate with the endpoint.
+ // (Gateway endpoint) The IDs of the route tables to associate with the endpoint.
AddRouteTableIds []*string `locationName:"AddRouteTableId" locationNameList:"item" type:"list"`
- // (Interface endpoint) One or more security group IDs to associate with the
+ // (Interface endpoint) The IDs of the security groups to associate with the
// network interface.
AddSecurityGroupIds []*string `locationName:"AddSecurityGroupId" locationNameList:"item" type:"list"`
- // (Interface and Gateway Load Balancer endpoints) One or more subnet IDs in
+ // (Interface and Gateway Load Balancer endpoints) The IDs of the subnets in
// which to serve the endpoint. For a Gateway Load Balancer endpoint, you can
// specify only one subnet.
AddSubnetIds []*string `locationName:"AddSubnetId" locationNameList:"item" type:"list"`
@@ -136943,14 +147023,14 @@ type ModifyVpcEndpointInput struct {
// with the VPC.
PrivateDnsEnabled *bool `type:"boolean"`
- // (Gateway endpoint) One or more route table IDs to disassociate from the endpoint.
+ // (Gateway endpoint) The IDs of the route tables to disassociate from the endpoint.
RemoveRouteTableIds []*string `locationName:"RemoveRouteTableId" locationNameList:"item" type:"list"`
- // (Interface endpoint) One or more security group IDs to disassociate from
+ // (Interface endpoint) The IDs of the security groups to disassociate from
// the network interface.
RemoveSecurityGroupIds []*string `locationName:"RemoveSecurityGroupId" locationNameList:"item" type:"list"`
- // (Interface endpoint) One or more subnets IDs in which to remove the endpoint.
+ // (Interface endpoint) The IDs of the subnets from which to remove the endpoint.
RemoveSubnetIds []*string `locationName:"RemoveSubnetId" locationNameList:"item" type:"list"`
// (Gateway endpoint) Specify true to reset the policy document to the default
@@ -137388,9 +147468,9 @@ func (s *ModifyVpcEndpointServicePayerResponsibilityOutput) SetReturnValue(v boo
type ModifyVpcEndpointServicePermissionsInput struct {
_ struct{} `type:"structure"`
- // The Amazon Resource Names (ARN) of one or more principals. Permissions are
- // granted to the principals in this list. To grant permissions to all principals,
- // specify an asterisk (*).
+ // The Amazon Resource Names (ARN) of the principals. Permissions are granted
+ // to the principals in this list. To grant permissions to all principals, specify
+ // an asterisk (*).
AddAllowedPrincipals []*string `locationNameList:"item" type:"list"`
// Checks whether you have the required permissions for the action, without
@@ -137399,8 +147479,8 @@ type ModifyVpcEndpointServicePermissionsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
- // The Amazon Resource Names (ARN) of one or more principals. Permissions are
- // revoked for principals in this list.
+ // The Amazon Resource Names (ARN) of the principals. Permissions are revoked
+ // for principals in this list.
RemoveAllowedPrincipals []*string `locationNameList:"item" type:"list"`
// The ID of the service.
@@ -138085,6 +148165,11 @@ type ModifyVpnTunnelOptionsInput struct {
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
+ // Choose whether or not to trigger immediate tunnel replacement.
+ //
+ // Valid values: True | False
+ SkipTunnelReplacement *bool `type:"boolean"`
+
// The tunnel options to modify.
//
// TunnelOptions is a required field
@@ -138144,6 +148229,12 @@ func (s *ModifyVpnTunnelOptionsInput) SetDryRun(v bool) *ModifyVpnTunnelOptionsI
return s
}
+// SetSkipTunnelReplacement sets the SkipTunnelReplacement field's value.
+func (s *ModifyVpnTunnelOptionsInput) SetSkipTunnelReplacement(v bool) *ModifyVpnTunnelOptionsInput {
+ s.SkipTunnelReplacement = &v
+ return s
+}
+
// SetTunnelOptions sets the TunnelOptions field's value.
func (s *ModifyVpnTunnelOptionsInput) SetTunnelOptions(v *ModifyVpnTunnelOptionsSpecification) *ModifyVpnTunnelOptionsInput {
s.TunnelOptions = v
@@ -138212,6 +148303,9 @@ type ModifyVpnTunnelOptionsSpecification struct {
// Default: 30
DPDTimeoutSeconds *int64 `type:"integer"`
+ // Turn on or off tunnel endpoint lifecycle control feature.
+ EnableTunnelLifecycleControl *bool `type:"boolean"`
+
// The IKE versions that are permitted for the VPN tunnel.
//
// Valid values: ikev1 | ikev2
@@ -138373,6 +148467,12 @@ func (s *ModifyVpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *Mod
return s
}
+// SetEnableTunnelLifecycleControl sets the EnableTunnelLifecycleControl field's value.
+func (s *ModifyVpnTunnelOptionsSpecification) SetEnableTunnelLifecycleControl(v bool) *ModifyVpnTunnelOptionsSpecification {
+ s.EnableTunnelLifecycleControl = &v
+ return s
+}
+
// SetIKEVersions sets the IKEVersions field's value.
func (s *ModifyVpnTunnelOptionsSpecification) SetIKEVersions(v []*IKEVersionsRequestListValue) *ModifyVpnTunnelOptionsSpecification {
s.IKEVersions = v
@@ -139037,6 +149137,16 @@ type NatGatewayAddress struct {
// associated with the NAT gateway.
AllocationId *string `locationName:"allocationId" type:"string"`
+ // [Public NAT gateway only] The association ID of the Elastic IP address that's
+ // associated with the NAT gateway.
+ AssociationId *string `locationName:"associationId" type:"string"`
+
+ // The address failure message.
+ FailureMessage *string `locationName:"failureMessage" type:"string"`
+
+ // Defines if the IP address is the primary address.
+ IsPrimary *bool `locationName:"isPrimary" type:"boolean"`
+
// The ID of the network interface associated with the NAT gateway.
NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
@@ -139046,6 +149156,9 @@ type NatGatewayAddress struct {
// [Public NAT gateway only] The Elastic IP address associated with the NAT
// gateway.
PublicIp *string `locationName:"publicIp" type:"string"`
+
+ // The address status.
+ Status *string `locationName:"status" type:"string" enum:"NatGatewayAddressStatus"`
}
// String returns the string representation.
@@ -139072,6 +149185,24 @@ func (s *NatGatewayAddress) SetAllocationId(v string) *NatGatewayAddress {
return s
}
+// SetAssociationId sets the AssociationId field's value.
+func (s *NatGatewayAddress) SetAssociationId(v string) *NatGatewayAddress {
+ s.AssociationId = &v
+ return s
+}
+
+// SetFailureMessage sets the FailureMessage field's value.
+func (s *NatGatewayAddress) SetFailureMessage(v string) *NatGatewayAddress {
+ s.FailureMessage = &v
+ return s
+}
+
+// SetIsPrimary sets the IsPrimary field's value.
+func (s *NatGatewayAddress) SetIsPrimary(v bool) *NatGatewayAddress {
+ s.IsPrimary = &v
+ return s
+}
+
// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
func (s *NatGatewayAddress) SetNetworkInterfaceId(v string) *NatGatewayAddress {
s.NetworkInterfaceId = &v
@@ -139090,6 +149221,12 @@ func (s *NatGatewayAddress) SetPublicIp(v string) *NatGatewayAddress {
return s
}
+// SetStatus sets the Status field's value.
+func (s *NatGatewayAddress) SetStatus(v string) *NatGatewayAddress {
+ s.Status = &v
+ return s
+}
+
// Describes a network ACL.
type NetworkAcl struct {
_ struct{} `type:"structure"`
@@ -139864,6 +150001,7 @@ func (s *NetworkInsightsAccessScopeContent) SetNetworkInsightsAccessScopeId(v st
type NetworkInsightsAnalysis struct {
_ struct{} `type:"structure"`
+ // The member accounts that contain resources that the path can traverse.
AdditionalAccounts []*string `locationName:"additionalAccountSet" locationNameList:"item" type:"list"`
// Potential intermediate components.
@@ -139873,8 +150011,7 @@ type NetworkInsightsAnalysis struct {
// codes (https://docs.aws.amazon.com/vpc/latest/reachability/explanation-codes.html).
Explanations []*Explanation `locationName:"explanationSet" locationNameList:"item" type:"list"`
- // The Amazon Resource Names (ARN) of the Amazon Web Services resources that
- // the path must traverse.
+ // The Amazon Resource Names (ARN) of the resources that the path must traverse.
FilterInArns []*string `locationName:"filterInArnSet" locationNameList:"item" type:"list"`
// The components in the path from source to destination.
@@ -139904,6 +150041,7 @@ type NetworkInsightsAnalysis struct {
// The status message, if the status is failed.
StatusMessage *string `locationName:"statusMessage" type:"string"`
+ // Potential intermediate accounts.
SuggestedAccounts []*string `locationName:"suggestedAccountSet" locationNameList:"item" type:"list"`
// The tags.
@@ -140034,18 +150172,24 @@ type NetworkInsightsPath struct {
// The time stamp when the path was created.
CreatedDate *time.Time `locationName:"createdDate" type:"timestamp"`
- // The Amazon Web Services resource that is the destination of the path.
+ // The ID of the destination.
Destination *string `locationName:"destination" type:"string"`
+ // The Amazon Resource Name (ARN) of the destination.
DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"`
- // The IP address of the Amazon Web Services resource that is the destination
- // of the path.
+ // The IP address of the destination.
DestinationIp *string `locationName:"destinationIp" type:"string"`
// The destination port.
DestinationPort *int64 `locationName:"destinationPort" type:"integer"`
+ // Scopes the analysis to network paths that match specific filters at the destination.
+ FilterAtDestination *PathFilter `locationName:"filterAtDestination" type:"structure"`
+
+ // Scopes the analysis to network paths that match specific filters at the source.
+ FilterAtSource *PathFilter `locationName:"filterAtSource" type:"structure"`
+
// The Amazon Resource Name (ARN) of the path.
NetworkInsightsPathArn *string `locationName:"networkInsightsPathArn" min:"1" type:"string"`
@@ -140055,13 +150199,13 @@ type NetworkInsightsPath struct {
// The protocol.
Protocol *string `locationName:"protocol" type:"string" enum:"Protocol"`
- // The Amazon Web Services resource that is the source of the path.
+ // The ID of the source.
Source *string `locationName:"source" type:"string"`
+ // The Amazon Resource Name (ARN) of the source.
SourceArn *string `locationName:"sourceArn" min:"1" type:"string"`
- // The IP address of the Amazon Web Services resource that is the source of
- // the path.
+ // The IP address of the source.
SourceIp *string `locationName:"sourceIp" type:"string"`
// The tags associated with the path.
@@ -140116,6 +150260,18 @@ func (s *NetworkInsightsPath) SetDestinationPort(v int64) *NetworkInsightsPath {
return s
}
+// SetFilterAtDestination sets the FilterAtDestination field's value.
+func (s *NetworkInsightsPath) SetFilterAtDestination(v *PathFilter) *NetworkInsightsPath {
+ s.FilterAtDestination = v
+ return s
+}
+
+// SetFilterAtSource sets the FilterAtSource field's value.
+func (s *NetworkInsightsPath) SetFilterAtSource(v *PathFilter) *NetworkInsightsPath {
+ s.FilterAtSource = v
+ return s
+}
+
// SetNetworkInsightsPathArn sets the NetworkInsightsPathArn field's value.
func (s *NetworkInsightsPath) SetNetworkInsightsPathArn(v string) *NetworkInsightsPath {
s.NetworkInsightsPathArn = &v
@@ -140993,6 +151149,92 @@ func (s *NewDhcpConfiguration) SetValues(v []*string) *NewDhcpConfiguration {
return s
}
+// Options for OIDC-based, user-identity type trust provider.
+type OidcOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The OIDC authorization endpoint.
+ AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"`
+
+ // The client identifier.
+ ClientId *string `locationName:"clientId" type:"string"`
+
+ // The client secret.
+ ClientSecret *string `locationName:"clientSecret" type:"string"`
+
+ // The OIDC issuer.
+ Issuer *string `locationName:"issuer" type:"string"`
+
+ // The OpenID Connect (OIDC) scope specified.
+ Scope *string `locationName:"scope" type:"string"`
+
+ // The OIDC token endpoint.
+ TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"`
+
+ // The OIDC user info endpoint.
+ UserInfoEndpoint *string `locationName:"userInfoEndpoint" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s OidcOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s OidcOptions) GoString() string {
+ return s.String()
+}
+
+// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value.
+func (s *OidcOptions) SetAuthorizationEndpoint(v string) *OidcOptions {
+ s.AuthorizationEndpoint = &v
+ return s
+}
+
+// SetClientId sets the ClientId field's value.
+func (s *OidcOptions) SetClientId(v string) *OidcOptions {
+ s.ClientId = &v
+ return s
+}
+
+// SetClientSecret sets the ClientSecret field's value.
+func (s *OidcOptions) SetClientSecret(v string) *OidcOptions {
+ s.ClientSecret = &v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *OidcOptions) SetIssuer(v string) *OidcOptions {
+ s.Issuer = &v
+ return s
+}
+
+// SetScope sets the Scope field's value.
+func (s *OidcOptions) SetScope(v string) *OidcOptions {
+ s.Scope = &v
+ return s
+}
+
+// SetTokenEndpoint sets the TokenEndpoint field's value.
+func (s *OidcOptions) SetTokenEndpoint(v string) *OidcOptions {
+ s.TokenEndpoint = &v
+ return s
+}
+
+// SetUserInfoEndpoint sets the UserInfoEndpoint field's value.
+func (s *OidcOptions) SetUserInfoEndpoint(v string) *OidcOptions {
+ s.UserInfoEndpoint = &v
+ return s
+}
+
// Describes the configuration of On-Demand Instances in an EC2 Fleet.
type OnDemandOptions struct {
_ struct{} `type:"structure"`
@@ -141394,6 +151636,12 @@ type PathComponent struct {
// The explanation codes.
Explanations []*Explanation `locationName:"explanationSet" locationNameList:"item" type:"list"`
+ // The Network Firewall stateful rule.
+ FirewallStatefulRule *FirewallStatefulRule `locationName:"firewallStatefulRule" type:"structure"`
+
+ // The Network Firewall stateless rule.
+ FirewallStatelessRule *FirewallStatelessRule `locationName:"firewallStatelessRule" type:"structure"`
+
// The inbound header.
InboundHeader *AnalysisPacketHeader `locationName:"inboundHeader" type:"structure"`
@@ -141409,6 +151657,9 @@ type PathComponent struct {
// The sequence number.
SequenceNumber *int64 `locationName:"sequenceNumber" type:"integer"`
+ // The name of the VPC endpoint service.
+ ServiceName *string `locationName:"serviceName" type:"string"`
+
// The source VPC.
SourceVpc *AnalysisComponent `locationName:"sourceVpc" type:"structure"`
@@ -141485,6 +151736,18 @@ func (s *PathComponent) SetExplanations(v []*Explanation) *PathComponent {
return s
}
+// SetFirewallStatefulRule sets the FirewallStatefulRule field's value.
+func (s *PathComponent) SetFirewallStatefulRule(v *FirewallStatefulRule) *PathComponent {
+ s.FirewallStatefulRule = v
+ return s
+}
+
+// SetFirewallStatelessRule sets the FirewallStatelessRule field's value.
+func (s *PathComponent) SetFirewallStatelessRule(v *FirewallStatelessRule) *PathComponent {
+ s.FirewallStatelessRule = v
+ return s
+}
+
// SetInboundHeader sets the InboundHeader field's value.
func (s *PathComponent) SetInboundHeader(v *AnalysisPacketHeader) *PathComponent {
s.InboundHeader = v
@@ -141515,6 +151778,12 @@ func (s *PathComponent) SetSequenceNumber(v int64) *PathComponent {
return s
}
+// SetServiceName sets the ServiceName field's value.
+func (s *PathComponent) SetServiceName(v string) *PathComponent {
+ s.ServiceName = &v
+ return s
+}
+
// SetSourceVpc sets the SourceVpc field's value.
func (s *PathComponent) SetSourceVpc(v *AnalysisComponent) *PathComponent {
s.SourceVpc = v
@@ -141545,6 +151814,126 @@ func (s *PathComponent) SetVpc(v *AnalysisComponent) *PathComponent {
return s
}
+// Describes a set of filters for a path analysis. Use path filters to scope
+// the analysis when there can be multiple resulting paths.
+type PathFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The destination IPv4 address.
+ DestinationAddress *string `locationName:"destinationAddress" type:"string"`
+
+ // The destination port range.
+ DestinationPortRange *FilterPortRange `locationName:"destinationPortRange" type:"structure"`
+
+ // The source IPv4 address.
+ SourceAddress *string `locationName:"sourceAddress" type:"string"`
+
+ // The source port range.
+ SourcePortRange *FilterPortRange `locationName:"sourcePortRange" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PathFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PathFilter) GoString() string {
+ return s.String()
+}
+
+// SetDestinationAddress sets the DestinationAddress field's value.
+func (s *PathFilter) SetDestinationAddress(v string) *PathFilter {
+ s.DestinationAddress = &v
+ return s
+}
+
+// SetDestinationPortRange sets the DestinationPortRange field's value.
+func (s *PathFilter) SetDestinationPortRange(v *FilterPortRange) *PathFilter {
+ s.DestinationPortRange = v
+ return s
+}
+
+// SetSourceAddress sets the SourceAddress field's value.
+func (s *PathFilter) SetSourceAddress(v string) *PathFilter {
+ s.SourceAddress = &v
+ return s
+}
+
+// SetSourcePortRange sets the SourcePortRange field's value.
+func (s *PathFilter) SetSourcePortRange(v *FilterPortRange) *PathFilter {
+ s.SourcePortRange = v
+ return s
+}
+
+// Describes a set of filters for a path analysis. Use path filters to scope
+// the analysis when there can be multiple resulting paths.
+type PathRequestFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The destination IPv4 address.
+ DestinationAddress *string `type:"string"`
+
+ // The destination port range.
+ DestinationPortRange *RequestFilterPortRange `type:"structure"`
+
+ // The source IPv4 address.
+ SourceAddress *string `type:"string"`
+
+ // The source port range.
+ SourcePortRange *RequestFilterPortRange `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PathRequestFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PathRequestFilter) GoString() string {
+ return s.String()
+}
+
+// SetDestinationAddress sets the DestinationAddress field's value.
+func (s *PathRequestFilter) SetDestinationAddress(v string) *PathRequestFilter {
+ s.DestinationAddress = &v
+ return s
+}
+
+// SetDestinationPortRange sets the DestinationPortRange field's value.
+func (s *PathRequestFilter) SetDestinationPortRange(v *RequestFilterPortRange) *PathRequestFilter {
+ s.DestinationPortRange = v
+ return s
+}
+
+// SetSourceAddress sets the SourceAddress field's value.
+func (s *PathRequestFilter) SetSourceAddress(v string) *PathRequestFilter {
+ s.SourceAddress = &v
+ return s
+}
+
+// SetSourcePortRange sets the SourcePortRange field's value.
+func (s *PathRequestFilter) SetSourcePortRange(v *RequestFilterPortRange) *PathRequestFilter {
+ s.SourcePortRange = v
+ return s
+}
+
// Describes a path statement.
type PathStatement struct {
_ struct{} `type:"structure"`
@@ -143596,7 +153985,9 @@ func (s *ProvisionByoipCidrOutput) SetByoipCidr(v *ByoipCidr) *ProvisionByoipCid
type ProvisionIpamPoolCidrInput struct {
_ struct{} `type:"structure"`
- // The CIDR you want to assign to the IPAM pool.
+ // The CIDR you want to assign to the IPAM pool. Either "NetmaskLength" or "Cidr"
+ // is required. This value will be null if you specify "NetmaskLength" and will
+ // be filled in during the provisioning process.
Cidr *string `type:"string"`
// A signed document that proves that you are authorized to bring a specified
@@ -143604,6 +153995,10 @@ type ProvisionIpamPoolCidrInput struct {
// only.
CidrAuthorizationContext *IpamCidrAuthorizationContext `type:"structure"`
+ // A unique, case-sensitive identifier that you provide to ensure the idempotency
+ // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
+ ClientToken *string `type:"string" idempotencyToken:"true"`
+
// A check for whether you have the required permissions for the action without
// actually making the request and provides an error response. If you have the
// required permissions, the error response is DryRunOperation. Otherwise, it
@@ -143614,6 +154009,12 @@ type ProvisionIpamPoolCidrInput struct {
//
// IpamPoolId is a required field
IpamPoolId *string `type:"string" required:"true"`
+
+ // The netmask length of the CIDR you'd like to provision to a pool. Can be
+ // used for provisioning Amazon-provided IPv6 CIDRs to top-level pools and for
+ // provisioning CIDRs to pools with source pools. Cannot be used to provision
+ // BYOIP CIDRs to top-level pools. Either "NetmaskLength" or "Cidr" is required.
+ NetmaskLength *int64 `type:"integer"`
}
// String returns the string representation.
@@ -143659,6 +154060,12 @@ func (s *ProvisionIpamPoolCidrInput) SetCidrAuthorizationContext(v *IpamCidrAuth
return s
}
+// SetClientToken sets the ClientToken field's value.
+func (s *ProvisionIpamPoolCidrInput) SetClientToken(v string) *ProvisionIpamPoolCidrInput {
+ s.ClientToken = &v
+ return s
+}
+
// SetDryRun sets the DryRun field's value.
func (s *ProvisionIpamPoolCidrInput) SetDryRun(v bool) *ProvisionIpamPoolCidrInput {
s.DryRun = &v
@@ -143671,6 +154078,12 @@ func (s *ProvisionIpamPoolCidrInput) SetIpamPoolId(v string) *ProvisionIpamPoolC
return s
}
+// SetNetmaskLength sets the NetmaskLength field's value.
+func (s *ProvisionIpamPoolCidrInput) SetNetmaskLength(v int64) *ProvisionIpamPoolCidrInput {
+ s.NetmaskLength = &v
+ return s
+}
+
type ProvisionIpamPoolCidrOutput struct {
_ struct{} `type:"structure"`
@@ -144930,8 +155343,15 @@ type RegisterImageInput struct {
Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"`
// The billing product codes. Your account must be authorized to specify billing
- // product codes. Otherwise, you can use the Amazon Web Services Marketplace
- // to bill for the use of an AMI.
+ // product codes.
+ //
+ // If your account is not authorized to specify billing product codes, you can
+ // publish AMIs that include billable software and list them on the Amazon Web
+ // Services Marketplace. You must first register as a seller on the Amazon Web
+ // Services Marketplace. For more information, see Getting started as a seller
+ // (https://docs.aws.amazon.com/marketplace/latest/userguide/user-guide-for-sellers.html)
+ // and AMI-based products (https://docs.aws.amazon.com/marketplace/latest/userguide/ami-products.html)
+ // in the Amazon Web Services Marketplace Seller Guide.
BillingProducts []*string `locationName:"BillingProduct" locationNameList:"item" type:"list"`
// The block device mapping entries.
@@ -144943,11 +155363,17 @@ type RegisterImageInput struct {
// the same Outpost or in the Region of that Outpost. AMIs on an Outpost that
// include local snapshots can be used to launch instances on the same Outpost
// only. For more information, Amazon EBS local snapshots on Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#ami)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
- // The boot mode of the AMI. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // The boot mode of the AMI. A value of uefi-preferred indicates that the AMI
+ // supports both UEFI and Legacy BIOS.
+ //
+ // The operating system contained in the AMI must be configured to support the
+ // specified boot mode.
+ //
+ // For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)
+ // in the Amazon EC2 User Guide.
BootMode *string `type:"string" enum:"BootModeValues"`
// A description for your AMI.
@@ -144977,7 +155403,7 @@ type RegisterImageInput struct {
// by default, the instance requires that IMDSv2 is used when requesting instance
// metadata. In addition, HttpPutResponseHopLimit is set to 2. For more information,
// see Configure the AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html#configure-IMDS-new-instances-ami-configuration)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
//
// If you set the value to v2.0, make sure that your AMI software can support
// IMDSv2.
@@ -145013,7 +155439,7 @@ type RegisterImageInput struct {
// Set to v2.0 to enable Trusted Platform Module (TPM) support. For more information,
// see NitroTPM (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
TpmSupport *string `type:"string" enum:"TpmSupportValues"`
// Base64 representation of the non-volatile UEFI variable store. To retrieve
@@ -145021,7 +155447,7 @@ type RegisterImageInput struct {
// command. You can inspect and modify the UEFI data by using the python-uefivars
// tool (https://github.com/awslabs/python-uefivars) on GitHub. For more information,
// see UEFI Secure Boot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html)
- // in the Amazon Elastic Compute Cloud User Guide.
+ // in the Amazon EC2 User Guide.
UefiData *string `type:"string"`
// The type of virtualization (hvm | paravirtual).
@@ -145328,10 +155754,14 @@ type RegisterTransitGatewayMulticastGroupMembersInput struct {
// The group members' network interface IDs to register with the transit gateway
// multicast group.
- NetworkInterfaceIds []*string `locationNameList:"item" type:"list"`
+ //
+ // NetworkInterfaceIds is a required field
+ NetworkInterfaceIds []*string `locationNameList:"item" type:"list" required:"true"`
// The ID of the transit gateway multicast domain.
- TransitGatewayMulticastDomainId *string `type:"string"`
+ //
+ // TransitGatewayMulticastDomainId is a required field
+ TransitGatewayMulticastDomainId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -145352,6 +155782,22 @@ func (s RegisterTransitGatewayMulticastGroupMembersInput) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RegisterTransitGatewayMulticastGroupMembersInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RegisterTransitGatewayMulticastGroupMembersInput"}
+ if s.NetworkInterfaceIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceIds"))
+ }
+ if s.TransitGatewayMulticastDomainId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransitGatewayMulticastDomainId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetDryRun sets the DryRun field's value.
func (s *RegisterTransitGatewayMulticastGroupMembersInput) SetDryRun(v bool) *RegisterTransitGatewayMulticastGroupMembersInput {
s.DryRun = &v
@@ -145421,10 +155867,14 @@ type RegisterTransitGatewayMulticastGroupSourcesInput struct {
// The group sources' network interface IDs to register with the transit gateway
// multicast group.
- NetworkInterfaceIds []*string `locationNameList:"item" type:"list"`
+ //
+ // NetworkInterfaceIds is a required field
+ NetworkInterfaceIds []*string `locationNameList:"item" type:"list" required:"true"`
// The ID of the transit gateway multicast domain.
- TransitGatewayMulticastDomainId *string `type:"string"`
+ //
+ // TransitGatewayMulticastDomainId is a required field
+ TransitGatewayMulticastDomainId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -145445,6 +155895,22 @@ func (s RegisterTransitGatewayMulticastGroupSourcesInput) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RegisterTransitGatewayMulticastGroupSourcesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RegisterTransitGatewayMulticastGroupSourcesInput"}
+ if s.NetworkInterfaceIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceIds"))
+ }
+ if s.TransitGatewayMulticastDomainId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransitGatewayMulticastDomainId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetDryRun sets the DryRun field's value.
func (s *RegisterTransitGatewayMulticastGroupSourcesInput) SetDryRun(v bool) *RegisterTransitGatewayMulticastGroupSourcesInput {
s.DryRun = &v
@@ -145784,7 +156250,7 @@ type RejectVpcEndpointConnectionsInput struct {
// ServiceId is a required field
ServiceId *string `type:"string" required:"true"`
- // The IDs of one or more VPC endpoints.
+ // The IDs of the VPC endpoints.
//
// VpcEndpointIds is a required field
VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"`
@@ -147293,6 +157759,118 @@ func (s *ReplaceTransitGatewayRouteOutput) SetRoute(v *TransitGatewayRoute) *Rep
return s
}
+type ReplaceVpnTunnelInput struct {
+ _ struct{} `type:"structure"`
+
+ // Trigger pending tunnel endpoint maintenance.
+ ApplyPendingMaintenance *bool `type:"boolean"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the Site-to-Site VPN connection.
+ //
+ // VpnConnectionId is a required field
+ VpnConnectionId *string `type:"string" required:"true"`
+
+ // The external IP address of the VPN tunnel.
+ //
+ // VpnTunnelOutsideIpAddress is a required field
+ VpnTunnelOutsideIpAddress *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplaceVpnTunnelInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplaceVpnTunnelInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplaceVpnTunnelInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplaceVpnTunnelInput"}
+ if s.VpnConnectionId == nil {
+ invalidParams.Add(request.NewErrParamRequired("VpnConnectionId"))
+ }
+ if s.VpnTunnelOutsideIpAddress == nil {
+ invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetApplyPendingMaintenance sets the ApplyPendingMaintenance field's value.
+func (s *ReplaceVpnTunnelInput) SetApplyPendingMaintenance(v bool) *ReplaceVpnTunnelInput {
+ s.ApplyPendingMaintenance = &v
+ return s
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *ReplaceVpnTunnelInput) SetDryRun(v bool) *ReplaceVpnTunnelInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetVpnConnectionId sets the VpnConnectionId field's value.
+func (s *ReplaceVpnTunnelInput) SetVpnConnectionId(v string) *ReplaceVpnTunnelInput {
+ s.VpnConnectionId = &v
+ return s
+}
+
+// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value.
+func (s *ReplaceVpnTunnelInput) SetVpnTunnelOutsideIpAddress(v string) *ReplaceVpnTunnelInput {
+ s.VpnTunnelOutsideIpAddress = &v
+ return s
+}
+
+type ReplaceVpnTunnelOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Confirmation of replace tunnel operation.
+ Return *bool `locationName:"return" type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplaceVpnTunnelOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ReplaceVpnTunnelOutput) GoString() string {
+ return s.String()
+}
+
+// SetReturn sets the Return field's value.
+func (s *ReplaceVpnTunnelOutput) SetReturn(v bool) *ReplaceVpnTunnelOutput {
+ s.Return = &v
+ return s
+}
+
type ReportInstanceStatusInput struct {
_ struct{} `type:"structure"`
@@ -147449,6 +158027,47 @@ func (s ReportInstanceStatusOutput) GoString() string {
return s.String()
}
+// Describes a port range.
+type RequestFilterPortRange struct {
+ _ struct{} `type:"structure"`
+
+ // The first port in the range.
+ FromPort *int64 `type:"integer"`
+
+ // The last port in the range.
+ ToPort *int64 `type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RequestFilterPortRange) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RequestFilterPortRange) GoString() string {
+ return s.String()
+}
+
+// SetFromPort sets the FromPort field's value.
+func (s *RequestFilterPortRange) SetFromPort(v int64) *RequestFilterPortRange {
+ s.FromPort = &v
+ return s
+}
+
+// SetToPort sets the ToPort field's value.
+func (s *RequestFilterPortRange) SetToPort(v int64) *RequestFilterPortRange {
+ s.ToPort = &v
+ return s
+}
+
// A tag on an IPAM resource.
type RequestIpamResourceTag struct {
_ struct{} `type:"structure"`
@@ -147515,7 +158134,8 @@ type RequestLaunchTemplateData struct {
CreditSpecification *CreditSpecificationRequest `type:"structure"`
// Indicates whether to enable the instance for stop protection. For more information,
- // see Stop Protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection).
+ // see Stop protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection)
+ // in the Amazon Elastic Compute Cloud User Guide.
DisableApiStop *bool `type:"boolean"`
// If you set this parameter to true, you can't terminate the instance using
@@ -147555,7 +158175,22 @@ type RequestLaunchTemplateData struct {
// The name or Amazon Resource Name (ARN) of an IAM instance profile.
IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecificationRequest `type:"structure"`
- // The ID of the AMI.
+ // The ID of the AMI. Alternatively, you can specify a Systems Manager parameter,
+ // which will resolve to an AMI ID on launch.
+ //
+ // Valid formats:
+ //
+ // * ami-17characters00000
+ //
+ // * resolve:ssm:parameter-name
+ //
+ // * resolve:ssm:parameter-name:version-number
+ //
+ // * resolve:ssm:parameter-name:label
+ //
+ // For more information, see Use a Systems Manager parameter to find an AMI
+ // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI)
+ // in the Amazon Elastic Compute Cloud User Guide.
ImageId *string `type:"string"`
// Indicates whether an instance stops or terminates when you initiate shutdown
@@ -148315,9 +158950,13 @@ type RequestSpotLaunchSpecification struct {
// The ID of the subnet in which to launch the instance.
SubnetId *string `locationName:"subnetId" type:"string"`
- // The Base64-encoded user data for the instance. User data is limited to 16
- // KB.
- UserData *string `locationName:"userData" type:"string"`
+ // The base64-encoded user data that instances use when starting up. User data
+ // is limited to 16 KB.
+ //
+ // UserData is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by RequestSpotLaunchSpecification's
+ // String and GoString methods.
+ UserData *string `locationName:"userData" type:"string" sensitive:"true"`
}
// String returns the string representation.
@@ -150281,7 +160920,8 @@ type ResponseLaunchTemplateData struct {
CreditSpecification *CreditSpecification `locationName:"creditSpecification" type:"structure"`
// Indicates whether the instance is enabled for stop protection. For more information,
- // see Stop Protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection).
+ // see Stop protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection)
+ // in the Amazon Elastic Compute Cloud User Guide.
DisableApiStop *bool `locationName:"disableApiStop" type:"boolean"`
// If set to true, indicates that the instance cannot be terminated using the
@@ -150308,7 +160948,24 @@ type ResponseLaunchTemplateData struct {
// The IAM instance profile.
IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"`
- // The ID of the AMI that was used to launch the instance.
+ // The ID of the AMI or a Systems Manager parameter. The Systems Manager parameter
+ // will resolve to the ID of the AMI at instance launch.
+ //
+ // The value depends on what you specified in the request. The possible values
+ // are:
+ //
+ // * If an AMI ID was specified in the request, then this is the AMI ID.
+ //
+ // * If a Systems Manager parameter was specified in the request, and ResolveAlias
+ // was configured as true, then this is the AMI ID that the parameter is
+ // mapped to in the Parameter Store.
+ //
+ // * If a Systems Manager parameter was specified in the request, and ResolveAlias
+ // was configured as false, then this is the parameter value.
+ //
+ // For more information, see Use a Systems Manager parameter instead of an AMI
+ // ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id)
+ // in the Amazon Elastic Compute Cloud User Guide.
ImageId *string `locationName:"imageId" type:"string"`
// Indicates whether an instance stops or terminates when you initiate shutdown
@@ -150370,7 +161027,11 @@ type ResponseLaunchTemplateData struct {
TagSpecifications []*LaunchTemplateTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"`
// The user data for the instance.
- UserData *string `locationName:"userData" type:"string"`
+ //
+ // UserData is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by ResponseLaunchTemplateData's
+ // String and GoString methods.
+ UserData *string `locationName:"userData" type:"string" sensitive:"true"`
}
// String returns the string representation.
@@ -151506,8 +162167,9 @@ type RevokeSecurityGroupIngressInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
- // The start of port range for the TCP and UDP protocols, or an ICMP type number.
- // For the ICMP type number, use -1 to specify all ICMP types.
+ // If the protocol is TCP or UDP, this is the start of the port range. If the
+ // protocol is ICMP, this is the type number. A value of -1 indicates all ICMP
+ // types.
FromPort *int64 `type:"integer"`
// The ID of the security group. You must specify either the security group
@@ -151548,8 +162210,8 @@ type RevokeSecurityGroupIngressInput struct {
// range, use a set of IP permissions instead.
SourceSecurityGroupOwnerId *string `type:"string"`
- // The end of port range for the TCP and UDP protocols, or an ICMP code number.
- // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.
+ // If the protocol is TCP or UDP, this is the end of the port range. If the
+ // protocol is ICMP, this is the code. A value of -1 indicates all ICMP codes.
ToPort *int64 `type:"integer"`
}
@@ -152059,6 +162721,129 @@ func (s *RouteTableAssociationState) SetStatusMessage(v string) *RouteTableAssoc
return s
}
+// Describes the rule options for a stateful rule group.
+type RuleGroupRuleOptionsPair struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the rule group.
+ RuleGroupArn *string `locationName:"ruleGroupArn" min:"1" type:"string"`
+
+ // The rule options.
+ RuleOptions []*RuleOption `locationName:"ruleOptionSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RuleGroupRuleOptionsPair) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RuleGroupRuleOptionsPair) GoString() string {
+ return s.String()
+}
+
+// SetRuleGroupArn sets the RuleGroupArn field's value.
+func (s *RuleGroupRuleOptionsPair) SetRuleGroupArn(v string) *RuleGroupRuleOptionsPair {
+ s.RuleGroupArn = &v
+ return s
+}
+
+// SetRuleOptions sets the RuleOptions field's value.
+func (s *RuleGroupRuleOptionsPair) SetRuleOptions(v []*RuleOption) *RuleGroupRuleOptionsPair {
+ s.RuleOptions = v
+ return s
+}
+
+// Describes the type of a stateful rule group.
+type RuleGroupTypePair struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the rule group.
+ RuleGroupArn *string `locationName:"ruleGroupArn" min:"1" type:"string"`
+
+ // The rule group type. The possible values are Domain List and Suricata.
+ RuleGroupType *string `locationName:"ruleGroupType" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RuleGroupTypePair) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RuleGroupTypePair) GoString() string {
+ return s.String()
+}
+
+// SetRuleGroupArn sets the RuleGroupArn field's value.
+func (s *RuleGroupTypePair) SetRuleGroupArn(v string) *RuleGroupTypePair {
+ s.RuleGroupArn = &v
+ return s
+}
+
+// SetRuleGroupType sets the RuleGroupType field's value.
+func (s *RuleGroupTypePair) SetRuleGroupType(v string) *RuleGroupTypePair {
+ s.RuleGroupType = &v
+ return s
+}
+
+// Describes additional settings for a stateful rule.
+type RuleOption struct {
+ _ struct{} `type:"structure"`
+
+ // The Suricata keyword.
+ Keyword *string `locationName:"keyword" type:"string"`
+
+ // The settings for the keyword.
+ Settings []*string `locationName:"settingSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RuleOption) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RuleOption) GoString() string {
+ return s.String()
+}
+
+// SetKeyword sets the Keyword field's value.
+func (s *RuleOption) SetKeyword(v string) *RuleOption {
+ s.Keyword = &v
+ return s
+}
+
+// SetSettings sets the Settings field's value.
+func (s *RuleOption) SetSettings(v []*string) *RuleOption {
+ s.Settings = v
+ return s
+}
+
type RunInstancesInput struct {
_ struct{} `type:"structure"`
@@ -152141,6 +162926,14 @@ type RunInstancesInput struct {
// to accelerate your Deep Learning (DL) inference workloads.
//
// You cannot specify accelerators from different generations in the same request.
+ //
+ // Starting April 15, 2023, Amazon Web Services will not onboard new customers
+ // to Amazon Elastic Inference (EI), and will help current customers migrate
+ // their workloads to options that offer better price and performance. After
+ // April 15, 2023, new customers will not be able to launch instances with Amazon
+ // EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However,
+ // customers who have used Amazon EI at least once during the past 30-day period
+ // are considered current customers and will be able to continue using the service.
ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"`
// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.
@@ -152300,8 +163093,7 @@ type RunInstancesInput struct {
// as part of the network interface.
SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
- // [EC2-Classic, default VPC] The names of the security groups. For a nondefault
- // VPC, you must use security group IDs instead.
+ // [EC2-Classic, default VPC] The names of the security groups.
//
// If you specify a network interface, you must specify any security groups
// as part of the network interface.
@@ -152719,8 +163511,12 @@ type RunScheduledInstancesInput struct {
// The launch specification. You must match the instance type, Availability
// Zone, network, and platform of the schedule that you purchased.
//
+ // LaunchSpecification is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by RunScheduledInstancesInput's
+ // String and GoString methods.
+ //
// LaunchSpecification is a required field
- LaunchSpecification *ScheduledInstancesLaunchSpecification `type:"structure" required:"true"`
+ LaunchSpecification *ScheduledInstancesLaunchSpecification `type:"structure" required:"true" sensitive:"true"`
// The Scheduled Instance ID.
//
@@ -152884,8 +163680,9 @@ type S3Storage struct {
_ struct{} `type:"structure"`
// The access key ID of the owner of the bucket. Before you specify a value
- // for your access key ID, review and follow the guidance in Best practices
- // for managing Amazon Web Services access keys (https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html).
+ // for your access key ID, review and follow the guidance in Best Practices
+ // for Amazon Web Services accounts (https://docs.aws.amazon.com/accounts/latest/reference/best-practices.html)
+ // in the Account ManagementReference Guide.
AWSAccessKeyId *string `type:"string"`
// The bucket in which to store the AMI. You can specify a bucket that you already
@@ -153641,7 +164438,7 @@ func (s *ScheduledInstancesIpv6Address) SetIpv6Address(v string) *ScheduledInsta
// the ID of the subnet. You can specify the subnet using either SubnetId or
// NetworkInterface.
type ScheduledInstancesLaunchSpecification struct {
- _ struct{} `type:"structure"`
+ _ struct{} `type:"structure" sensitive:"true"`
// The block device mapping entries.
BlockDeviceMappings []*ScheduledInstancesBlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
@@ -154071,6 +164868,8 @@ type SearchLocalGatewayRoutesInput struct {
// One or more filters.
//
+ // * prefix-list-id - The ID of the prefix list.
+ //
// * route-search.exact-match - The exact match of the specified filter.
//
// * route-search.longest-prefix-match - The longest prefix that matches
@@ -154245,7 +165044,9 @@ type SearchTransitGatewayMulticastGroupsInput struct {
NextToken *string `type:"string"`
// The ID of the transit gateway multicast domain.
- TransitGatewayMulticastDomainId *string `type:"string"`
+ //
+ // TransitGatewayMulticastDomainId is a required field
+ TransitGatewayMulticastDomainId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -154272,6 +165073,9 @@ func (s *SearchTransitGatewayMulticastGroupsInput) Validate() error {
if s.MaxResults != nil && *s.MaxResults < 5 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
}
+ if s.TransitGatewayMulticastDomainId == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransitGatewayMulticastDomainId"))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -154700,9 +165504,10 @@ type SecurityGroupRule struct {
// The security group rule description.
Description *string `locationName:"description" type:"string"`
- // The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6
- // type. A value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6
- // types, you must specify all codes.
+ // If the protocol is TCP or UDP, this is the start of the port range. If the
+ // protocol is ICMP or ICMPv6, this is the type number. A value of -1 indicates
+ // all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify
+ // all ICMP/ICMPv6 codes.
FromPort *int64 `locationName:"fromPort" type:"integer"`
// The ID of the security group.
@@ -154732,9 +165537,10 @@ type SecurityGroupRule struct {
// The tags applied to the security group rule.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
- // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code.
- // A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6
- // types, you must specify all codes.
+ // If the protocol is TCP or UDP, this is the end of the port range. If the
+ // protocol is ICMP or ICMPv6, this is the type number. A value of -1 indicates
+ // all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify
+ // all ICMP/ICMPv6 codes.
ToPort *int64 `locationName:"toPort" type:"integer"`
}
@@ -154908,9 +165714,10 @@ type SecurityGroupRuleRequest struct {
// The description of the security group rule.
Description *string `type:"string"`
- // The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6
- // type. A value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6
- // types, you must specify all codes.
+ // If the protocol is TCP or UDP, this is the start of the port range. If the
+ // protocol is ICMP or ICMPv6, this is the type number. A value of -1 indicates
+ // all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify
+ // all ICMP/ICMPv6 codes.
FromPort *int64 `type:"integer"`
// The IP protocol name (tcp, udp, icmp, icmpv6) or number (see Protocol Numbers
@@ -154925,9 +165732,10 @@ type SecurityGroupRuleRequest struct {
// The ID of the security group that is referenced in the security group rule.
ReferencedGroupId *string `type:"string"`
- // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code.
- // A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6
- // types, you must specify all codes.
+ // If the protocol is TCP or UDP, this is the end of the port range. If the
+ // protocol is ICMP or ICMPv6, this is the code. A value of -1 indicates all
+ // ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify
+ // all ICMP/ICMPv6 codes.
ToPort *int64 `type:"integer"`
}
@@ -155005,7 +165813,9 @@ type SecurityGroupRuleUpdate struct {
SecurityGroupRule *SecurityGroupRuleRequest `type:"structure"`
// The ID of the security group rule.
- SecurityGroupRuleId *string `type:"string"`
+ //
+ // SecurityGroupRuleId is a required field
+ SecurityGroupRuleId *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -155026,6 +165836,19 @@ func (s SecurityGroupRuleUpdate) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SecurityGroupRuleUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SecurityGroupRuleUpdate"}
+ if s.SecurityGroupRuleId == nil {
+ invalidParams.Add(request.NewErrParamRequired("SecurityGroupRuleId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetSecurityGroupRule sets the SecurityGroupRule field's value.
func (s *SecurityGroupRuleUpdate) SetSecurityGroupRule(v *SecurityGroupRuleRequest) *SecurityGroupRuleUpdate {
s.SecurityGroupRule = v
@@ -155166,7 +165989,7 @@ type ServiceConfiguration struct {
// The supported IP address types.
SupportedIpAddressTypes []*string `locationName:"supportedIpAddressTypeSet" locationNameList:"item" type:"list" enum:"ServiceConnectivityType"`
- // Any tags assigned to the service.
+ // The tags assigned to the service.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
}
@@ -155326,7 +166149,7 @@ type ServiceDetail struct {
// The supported IP address types.
SupportedIpAddressTypes []*string `locationName:"supportedIpAddressTypeSet" locationNameList:"item" type:"list" enum:"ServiceConnectivityType"`
- // Any tags assigned to the service.
+ // The tags assigned to the service.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
// Indicates whether the service supports endpoint policies.
@@ -156622,8 +167445,13 @@ type SpotFleetLaunchSpecification struct {
// The tags to apply during creation.
TagSpecifications []*SpotFleetTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"`
- // The Base64-encoded user data that instances use when starting up.
- UserData *string `locationName:"userData" type:"string"`
+ // The base64-encoded user data that instances use when starting up. User data
+ // is limited to 16 KB.
+ //
+ // UserData is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by SpotFleetLaunchSpecification's
+ // String and GoString methods.
+ UserData *string `locationName:"userData" type:"string" sensitive:"true"`
// The number of units provided by the specified instance type. These are the
// same units that you chose to set the target capacity in terms of instances,
@@ -156941,9 +167769,11 @@ type SpotFleetRequestConfigData struct {
// Reserved.
Context *string `locationName:"context" type:"string"`
- // Indicates whether running Spot Instances should be terminated if you decrease
+ // Indicates whether running instances should be terminated if you decrease
// the target capacity of the Spot Fleet request below the current size of the
// Spot Fleet.
+ //
+ // Supported only for fleets of type maintain.
ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"`
// The number of units fulfilled by this request compared to the set target
@@ -157073,7 +167903,8 @@ type SpotFleetRequestConfigData struct {
// TargetCapacity is a required field
TargetCapacity *int64 `locationName:"targetCapacity" type:"integer" required:"true"`
- // The unit for the target capacity.
+ // The unit for the target capacity. TargetCapacityUnitType can only be specified
+ // when InstanceRequirements is specified.
//
// Default: units (translates to number of instances)
TargetCapacityUnitType *string `locationName:"targetCapacityUnitType" type:"string" enum:"TargetCapacityUnitType"`
@@ -158684,6 +169515,7 @@ func (s *StartNetworkInsightsAccessScopeAnalysisOutput) SetNetworkInsightsAccess
type StartNetworkInsightsAnalysisInput struct {
_ struct{} `type:"structure"`
+ // The member accounts that contain resources that the path can traverse.
AdditionalAccounts []*string `locationName:"AdditionalAccount" locationNameList:"item" type:"list"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
@@ -159931,7 +170763,9 @@ func (s *TagDescription) SetValue(v string) *TagDescription {
return s
}
-// The tags to apply to a resource when the resource is being created.
+// The tags to apply to a resource when the resource is being created. When
+// you specify a tag, you must specify the resource type to tag, otherwise the
+// request will fail.
//
// The Valid Values lists all the resource types that can be tagged. However,
// the action you're using might not support tagging all of these resource types.
@@ -160006,7 +170840,8 @@ type TargetCapacitySpecification struct {
// for On-Demand units, you cannot specify a target capacity for Spot units.
SpotTargetCapacity *int64 `locationName:"spotTargetCapacity" type:"integer"`
- // The unit for the target capacity.
+ // The unit for the target capacity. TargetCapacityUnitType can only be specified
+ // when InstanceRequirements is specified.
//
// Default: units (translates to number of instances)
TargetCapacityUnitType *string `locationName:"targetCapacityUnitType" type:"string" enum:"TargetCapacityUnitType"`
@@ -160090,7 +170925,8 @@ type TargetCapacitySpecificationRequest struct {
// The number of Spot units to request.
SpotTargetCapacity *int64 `type:"integer"`
- // The unit for the target capacity.
+ // The unit for the target capacity. TargetCapacityUnitType can only be specified
+ // when InstanceRequirements is specified.
//
// Default: units (translates to number of instances)
TargetCapacityUnitType *string `type:"string" enum:"TargetCapacityUnitType"`
@@ -164399,6 +175235,9 @@ type TunnelOption struct {
// The number of seconds after which a DPD timeout occurs.
DpdTimeoutSeconds *int64 `locationName:"dpdTimeoutSeconds" type:"integer"`
+ // Status of tunnel endpoint lifecycle control feature.
+ EnableTunnelLifecycleControl *bool `locationName:"enableTunnelLifecycleControl" type:"boolean"`
+
// The IKE versions that are permitted for the VPN tunnel.
IkeVersions []*IKEVersionsListValue `locationName:"ikeVersionSet" locationNameList:"item" type:"list"`
@@ -164490,6 +175329,12 @@ func (s *TunnelOption) SetDpdTimeoutSeconds(v int64) *TunnelOption {
return s
}
+// SetEnableTunnelLifecycleControl sets the EnableTunnelLifecycleControl field's value.
+func (s *TunnelOption) SetEnableTunnelLifecycleControl(v bool) *TunnelOption {
+ s.EnableTunnelLifecycleControl = &v
+ return s
+}
+
// SetIkeVersions sets the IkeVersions field's value.
func (s *TunnelOption) SetIkeVersions(v []*IKEVersionsListValue) *TunnelOption {
s.IkeVersions = v
@@ -164799,6 +175644,132 @@ func (s UnassignPrivateIpAddressesOutput) GoString() string {
return s.String()
}
+type UnassignPrivateNatGatewayAddressInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The maximum amount of time to wait (in seconds) before forcibly releasing
+ // the IP addresses if connections are still in progress. Default value is 350
+ // seconds.
+ MaxDrainDurationSeconds *int64 `min:"1" type:"integer"`
+
+ // The NAT gateway ID.
+ //
+ // NatGatewayId is a required field
+ NatGatewayId *string `type:"string" required:"true"`
+
+ // The private IPv4 addresses you want to unassign.
+ //
+ // PrivateIpAddresses is a required field
+ PrivateIpAddresses []*string `locationName:"PrivateIpAddress" locationNameList:"item" type:"list" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UnassignPrivateNatGatewayAddressInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UnassignPrivateNatGatewayAddressInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UnassignPrivateNatGatewayAddressInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UnassignPrivateNatGatewayAddressInput"}
+ if s.MaxDrainDurationSeconds != nil && *s.MaxDrainDurationSeconds < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxDrainDurationSeconds", 1))
+ }
+ if s.NatGatewayId == nil {
+ invalidParams.Add(request.NewErrParamRequired("NatGatewayId"))
+ }
+ if s.PrivateIpAddresses == nil {
+ invalidParams.Add(request.NewErrParamRequired("PrivateIpAddresses"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *UnassignPrivateNatGatewayAddressInput) SetDryRun(v bool) *UnassignPrivateNatGatewayAddressInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetMaxDrainDurationSeconds sets the MaxDrainDurationSeconds field's value.
+func (s *UnassignPrivateNatGatewayAddressInput) SetMaxDrainDurationSeconds(v int64) *UnassignPrivateNatGatewayAddressInput {
+ s.MaxDrainDurationSeconds = &v
+ return s
+}
+
+// SetNatGatewayId sets the NatGatewayId field's value.
+func (s *UnassignPrivateNatGatewayAddressInput) SetNatGatewayId(v string) *UnassignPrivateNatGatewayAddressInput {
+ s.NatGatewayId = &v
+ return s
+}
+
+// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
+func (s *UnassignPrivateNatGatewayAddressInput) SetPrivateIpAddresses(v []*string) *UnassignPrivateNatGatewayAddressInput {
+ s.PrivateIpAddresses = v
+ return s
+}
+
+type UnassignPrivateNatGatewayAddressOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Information about the NAT gateway IP addresses.
+ NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"`
+
+ // The NAT gateway ID.
+ NatGatewayId *string `locationName:"natGatewayId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UnassignPrivateNatGatewayAddressOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UnassignPrivateNatGatewayAddressOutput) GoString() string {
+ return s.String()
+}
+
+// SetNatGatewayAddresses sets the NatGatewayAddresses field's value.
+func (s *UnassignPrivateNatGatewayAddressOutput) SetNatGatewayAddresses(v []*NatGatewayAddress) *UnassignPrivateNatGatewayAddressOutput {
+ s.NatGatewayAddresses = v
+ return s
+}
+
+// SetNatGatewayId sets the NatGatewayId field's value.
+func (s *UnassignPrivateNatGatewayAddressOutput) SetNatGatewayId(v string) *UnassignPrivateNatGatewayAddressOutput {
+ s.NatGatewayId = &v
+ return s
+}
+
type UnmonitorInstancesInput struct {
_ struct{} `type:"structure"`
@@ -165739,6 +176710,1282 @@ func (s *ValidationWarning) SetErrors(v []*ValidationError) *ValidationWarning {
return s
}
+// An Amazon Web Services Verified Access endpoint specifies the application
+// that Amazon Web Services Verified Access provides access to. It must be attached
+// to an Amazon Web Services Verified Access group. An Amazon Web Services Verified
+// Access endpoint must also have an attached access policy before you attached
+// it to a group.
+type VerifiedAccessEndpoint struct {
+ _ struct{} `type:"structure"`
+
+ // The DNS name for users to reach your application.
+ ApplicationDomain *string `locationName:"applicationDomain" type:"string"`
+
+ // The type of attachment used to provide connectivity between the Amazon Web
+ // Services Verified Access endpoint and the application.
+ AttachmentType *string `locationName:"attachmentType" type:"string" enum:"VerifiedAccessEndpointAttachmentType"`
+
+ // The creation time.
+ CreationTime *string `locationName:"creationTime" type:"string"`
+
+ // The deletion time.
+ DeletionTime *string `locationName:"deletionTime" type:"string"`
+
+ // A description for the Amazon Web Services Verified Access endpoint.
+ Description *string `locationName:"description" type:"string"`
+
+ // Returned if endpoint has a device trust provider attached.
+ DeviceValidationDomain *string `locationName:"deviceValidationDomain" type:"string"`
+
+ // The ARN of a public TLS/SSL certificate imported into or created with ACM.
+ DomainCertificateArn *string `locationName:"domainCertificateArn" type:"string"`
+
+ // A DNS name that is generated for the endpoint.
+ EndpointDomain *string `locationName:"endpointDomain" type:"string"`
+
+ // The type of Amazon Web Services Verified Access endpoint. Incoming application
+ // requests will be sent to an IP address, load balancer or a network interface
+ // depending on the endpoint type specified.
+ EndpointType *string `locationName:"endpointType" type:"string" enum:"VerifiedAccessEndpointType"`
+
+ // The last updated time.
+ LastUpdatedTime *string `locationName:"lastUpdatedTime" type:"string"`
+
+ // The load balancer details if creating the Amazon Web Services Verified Access
+ // endpoint as load-balancertype.
+ LoadBalancerOptions *VerifiedAccessEndpointLoadBalancerOptions `locationName:"loadBalancerOptions" type:"structure"`
+
+ // The options for network-interface type endpoint.
+ NetworkInterfaceOptions *VerifiedAccessEndpointEniOptions `locationName:"networkInterfaceOptions" type:"structure"`
+
+ // The IDs of the security groups for the endpoint.
+ SecurityGroupIds []*string `locationName:"securityGroupIdSet" locationNameList:"item" type:"list"`
+
+ // The endpoint status.
+ Status *VerifiedAccessEndpointStatus `locationName:"status" type:"structure"`
+
+ // The tags.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The ID of the Amazon Web Services Verified Access endpoint.
+ VerifiedAccessEndpointId *string `locationName:"verifiedAccessEndpointId" type:"string"`
+
+ // The ID of the Amazon Web Services Verified Access group.
+ VerifiedAccessGroupId *string `locationName:"verifiedAccessGroupId" type:"string"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstanceId *string `locationName:"verifiedAccessInstanceId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessEndpoint) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessEndpoint) GoString() string {
+ return s.String()
+}
+
+// SetApplicationDomain sets the ApplicationDomain field's value.
+func (s *VerifiedAccessEndpoint) SetApplicationDomain(v string) *VerifiedAccessEndpoint {
+ s.ApplicationDomain = &v
+ return s
+}
+
+// SetAttachmentType sets the AttachmentType field's value.
+func (s *VerifiedAccessEndpoint) SetAttachmentType(v string) *VerifiedAccessEndpoint {
+ s.AttachmentType = &v
+ return s
+}
+
+// SetCreationTime sets the CreationTime field's value.
+func (s *VerifiedAccessEndpoint) SetCreationTime(v string) *VerifiedAccessEndpoint {
+ s.CreationTime = &v
+ return s
+}
+
+// SetDeletionTime sets the DeletionTime field's value.
+func (s *VerifiedAccessEndpoint) SetDeletionTime(v string) *VerifiedAccessEndpoint {
+ s.DeletionTime = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *VerifiedAccessEndpoint) SetDescription(v string) *VerifiedAccessEndpoint {
+ s.Description = &v
+ return s
+}
+
+// SetDeviceValidationDomain sets the DeviceValidationDomain field's value.
+func (s *VerifiedAccessEndpoint) SetDeviceValidationDomain(v string) *VerifiedAccessEndpoint {
+ s.DeviceValidationDomain = &v
+ return s
+}
+
+// SetDomainCertificateArn sets the DomainCertificateArn field's value.
+func (s *VerifiedAccessEndpoint) SetDomainCertificateArn(v string) *VerifiedAccessEndpoint {
+ s.DomainCertificateArn = &v
+ return s
+}
+
+// SetEndpointDomain sets the EndpointDomain field's value.
+func (s *VerifiedAccessEndpoint) SetEndpointDomain(v string) *VerifiedAccessEndpoint {
+ s.EndpointDomain = &v
+ return s
+}
+
+// SetEndpointType sets the EndpointType field's value.
+func (s *VerifiedAccessEndpoint) SetEndpointType(v string) *VerifiedAccessEndpoint {
+ s.EndpointType = &v
+ return s
+}
+
+// SetLastUpdatedTime sets the LastUpdatedTime field's value.
+func (s *VerifiedAccessEndpoint) SetLastUpdatedTime(v string) *VerifiedAccessEndpoint {
+ s.LastUpdatedTime = &v
+ return s
+}
+
+// SetLoadBalancerOptions sets the LoadBalancerOptions field's value.
+func (s *VerifiedAccessEndpoint) SetLoadBalancerOptions(v *VerifiedAccessEndpointLoadBalancerOptions) *VerifiedAccessEndpoint {
+ s.LoadBalancerOptions = v
+ return s
+}
+
+// SetNetworkInterfaceOptions sets the NetworkInterfaceOptions field's value.
+func (s *VerifiedAccessEndpoint) SetNetworkInterfaceOptions(v *VerifiedAccessEndpointEniOptions) *VerifiedAccessEndpoint {
+ s.NetworkInterfaceOptions = v
+ return s
+}
+
+// SetSecurityGroupIds sets the SecurityGroupIds field's value.
+func (s *VerifiedAccessEndpoint) SetSecurityGroupIds(v []*string) *VerifiedAccessEndpoint {
+ s.SecurityGroupIds = v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *VerifiedAccessEndpoint) SetStatus(v *VerifiedAccessEndpointStatus) *VerifiedAccessEndpoint {
+ s.Status = v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *VerifiedAccessEndpoint) SetTags(v []*Tag) *VerifiedAccessEndpoint {
+ s.Tags = v
+ return s
+}
+
+// SetVerifiedAccessEndpointId sets the VerifiedAccessEndpointId field's value.
+func (s *VerifiedAccessEndpoint) SetVerifiedAccessEndpointId(v string) *VerifiedAccessEndpoint {
+ s.VerifiedAccessEndpointId = &v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *VerifiedAccessEndpoint) SetVerifiedAccessGroupId(v string) *VerifiedAccessEndpoint {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *VerifiedAccessEndpoint) SetVerifiedAccessInstanceId(v string) *VerifiedAccessEndpoint {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+// Options for a network-interface type endpoint.
+type VerifiedAccessEndpointEniOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the network interface.
+ NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
+
+ // The IP port number.
+ Port *int64 `locationName:"port" min:"1" type:"integer"`
+
+ // The IP protocol.
+ Protocol *string `locationName:"protocol" type:"string" enum:"VerifiedAccessEndpointProtocol"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessEndpointEniOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessEndpointEniOptions) GoString() string {
+ return s.String()
+}
+
+// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
+func (s *VerifiedAccessEndpointEniOptions) SetNetworkInterfaceId(v string) *VerifiedAccessEndpointEniOptions {
+ s.NetworkInterfaceId = &v
+ return s
+}
+
+// SetPort sets the Port field's value.
+func (s *VerifiedAccessEndpointEniOptions) SetPort(v int64) *VerifiedAccessEndpointEniOptions {
+ s.Port = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *VerifiedAccessEndpointEniOptions) SetProtocol(v string) *VerifiedAccessEndpointEniOptions {
+ s.Protocol = &v
+ return s
+}
+
+// Describes a load balancer when creating an Amazon Web Services Verified Access
+// endpoint using the load-balancer type.
+type VerifiedAccessEndpointLoadBalancerOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the load balancer.
+ LoadBalancerArn *string `locationName:"loadBalancerArn" type:"string"`
+
+ // The IP port number.
+ Port *int64 `locationName:"port" min:"1" type:"integer"`
+
+ // The IP protocol.
+ Protocol *string `locationName:"protocol" type:"string" enum:"VerifiedAccessEndpointProtocol"`
+
+ // The IDs of the subnets.
+ SubnetIds []*string `locationName:"subnetIdSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessEndpointLoadBalancerOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessEndpointLoadBalancerOptions) GoString() string {
+ return s.String()
+}
+
+// SetLoadBalancerArn sets the LoadBalancerArn field's value.
+func (s *VerifiedAccessEndpointLoadBalancerOptions) SetLoadBalancerArn(v string) *VerifiedAccessEndpointLoadBalancerOptions {
+ s.LoadBalancerArn = &v
+ return s
+}
+
+// SetPort sets the Port field's value.
+func (s *VerifiedAccessEndpointLoadBalancerOptions) SetPort(v int64) *VerifiedAccessEndpointLoadBalancerOptions {
+ s.Port = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *VerifiedAccessEndpointLoadBalancerOptions) SetProtocol(v string) *VerifiedAccessEndpointLoadBalancerOptions {
+ s.Protocol = &v
+ return s
+}
+
+// SetSubnetIds sets the SubnetIds field's value.
+func (s *VerifiedAccessEndpointLoadBalancerOptions) SetSubnetIds(v []*string) *VerifiedAccessEndpointLoadBalancerOptions {
+ s.SubnetIds = v
+ return s
+}
+
+// Describes the status of a Verified Access endpoint.
+type VerifiedAccessEndpointStatus struct {
+ _ struct{} `type:"structure"`
+
+ // The status code of the Verified Access endpoint.
+ Code *string `locationName:"code" type:"string" enum:"VerifiedAccessEndpointStatusCode"`
+
+ // The status message of the Verified Access endpoint.
+ Message *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessEndpointStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessEndpointStatus) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *VerifiedAccessEndpointStatus) SetCode(v string) *VerifiedAccessEndpointStatus {
+ s.Code = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *VerifiedAccessEndpointStatus) SetMessage(v string) *VerifiedAccessEndpointStatus {
+ s.Message = &v
+ return s
+}
+
+// Describes a Verified Access group.
+type VerifiedAccessGroup struct {
+ _ struct{} `type:"structure"`
+
+ // The creation time.
+ CreationTime *string `locationName:"creationTime" type:"string"`
+
+ // The deletion time.
+ DeletionTime *string `locationName:"deletionTime" type:"string"`
+
+ // A description for the Amazon Web Services Verified Access group.
+ Description *string `locationName:"description" type:"string"`
+
+ // The last updated time.
+ LastUpdatedTime *string `locationName:"lastUpdatedTime" type:"string"`
+
+ // The Amazon Web Services account number that owns the group.
+ Owner *string `locationName:"owner" type:"string"`
+
+ // The tags.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The ARN of the Verified Access group.
+ VerifiedAccessGroupArn *string `locationName:"verifiedAccessGroupArn" type:"string"`
+
+ // The ID of the Verified Access group.
+ VerifiedAccessGroupId *string `locationName:"verifiedAccessGroupId" type:"string"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstanceId *string `locationName:"verifiedAccessInstanceId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessGroup) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessGroup) GoString() string {
+ return s.String()
+}
+
+// SetCreationTime sets the CreationTime field's value.
+func (s *VerifiedAccessGroup) SetCreationTime(v string) *VerifiedAccessGroup {
+ s.CreationTime = &v
+ return s
+}
+
+// SetDeletionTime sets the DeletionTime field's value.
+func (s *VerifiedAccessGroup) SetDeletionTime(v string) *VerifiedAccessGroup {
+ s.DeletionTime = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *VerifiedAccessGroup) SetDescription(v string) *VerifiedAccessGroup {
+ s.Description = &v
+ return s
+}
+
+// SetLastUpdatedTime sets the LastUpdatedTime field's value.
+func (s *VerifiedAccessGroup) SetLastUpdatedTime(v string) *VerifiedAccessGroup {
+ s.LastUpdatedTime = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *VerifiedAccessGroup) SetOwner(v string) *VerifiedAccessGroup {
+ s.Owner = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *VerifiedAccessGroup) SetTags(v []*Tag) *VerifiedAccessGroup {
+ s.Tags = v
+ return s
+}
+
+// SetVerifiedAccessGroupArn sets the VerifiedAccessGroupArn field's value.
+func (s *VerifiedAccessGroup) SetVerifiedAccessGroupArn(v string) *VerifiedAccessGroup {
+ s.VerifiedAccessGroupArn = &v
+ return s
+}
+
+// SetVerifiedAccessGroupId sets the VerifiedAccessGroupId field's value.
+func (s *VerifiedAccessGroup) SetVerifiedAccessGroupId(v string) *VerifiedAccessGroup {
+ s.VerifiedAccessGroupId = &v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *VerifiedAccessGroup) SetVerifiedAccessInstanceId(v string) *VerifiedAccessGroup {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+// Describes a Verified Access instance.
+type VerifiedAccessInstance struct {
+ _ struct{} `type:"structure"`
+
+ // The creation time.
+ CreationTime *string `locationName:"creationTime" type:"string"`
+
+ // A description for the Amazon Web Services Verified Access instance.
+ Description *string `locationName:"description" type:"string"`
+
+ // The last updated time.
+ LastUpdatedTime *string `locationName:"lastUpdatedTime" type:"string"`
+
+ // The tags.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstanceId *string `locationName:"verifiedAccessInstanceId" type:"string"`
+
+ // The IDs of the Amazon Web Services Verified Access trust providers.
+ VerifiedAccessTrustProviders []*VerifiedAccessTrustProviderCondensed `locationName:"verifiedAccessTrustProviderSet" locationNameList:"item" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessInstance) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessInstance) GoString() string {
+ return s.String()
+}
+
+// SetCreationTime sets the CreationTime field's value.
+func (s *VerifiedAccessInstance) SetCreationTime(v string) *VerifiedAccessInstance {
+ s.CreationTime = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *VerifiedAccessInstance) SetDescription(v string) *VerifiedAccessInstance {
+ s.Description = &v
+ return s
+}
+
+// SetLastUpdatedTime sets the LastUpdatedTime field's value.
+func (s *VerifiedAccessInstance) SetLastUpdatedTime(v string) *VerifiedAccessInstance {
+ s.LastUpdatedTime = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *VerifiedAccessInstance) SetTags(v []*Tag) *VerifiedAccessInstance {
+ s.Tags = v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *VerifiedAccessInstance) SetVerifiedAccessInstanceId(v string) *VerifiedAccessInstance {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+// SetVerifiedAccessTrustProviders sets the VerifiedAccessTrustProviders field's value.
+func (s *VerifiedAccessInstance) SetVerifiedAccessTrustProviders(v []*VerifiedAccessTrustProviderCondensed) *VerifiedAccessInstance {
+ s.VerifiedAccessTrustProviders = v
+ return s
+}
+
+// Describes logging options for an Amazon Web Services Verified Access instance.
+type VerifiedAccessInstanceLoggingConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Details about the logging options.
+ AccessLogs *VerifiedAccessLogs `locationName:"accessLogs" type:"structure"`
+
+ // The ID of the Amazon Web Services Verified Access instance.
+ VerifiedAccessInstanceId *string `locationName:"verifiedAccessInstanceId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessInstanceLoggingConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessInstanceLoggingConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetAccessLogs sets the AccessLogs field's value.
+func (s *VerifiedAccessInstanceLoggingConfiguration) SetAccessLogs(v *VerifiedAccessLogs) *VerifiedAccessInstanceLoggingConfiguration {
+ s.AccessLogs = v
+ return s
+}
+
+// SetVerifiedAccessInstanceId sets the VerifiedAccessInstanceId field's value.
+func (s *VerifiedAccessInstanceLoggingConfiguration) SetVerifiedAccessInstanceId(v string) *VerifiedAccessInstanceLoggingConfiguration {
+ s.VerifiedAccessInstanceId = &v
+ return s
+}
+
+// Options for CloudWatch Logs as a logging destination.
+type VerifiedAccessLogCloudWatchLogsDestination struct {
+ _ struct{} `type:"structure"`
+
+ // The delivery status for access logs.
+ DeliveryStatus *VerifiedAccessLogDeliveryStatus `locationName:"deliveryStatus" type:"structure"`
+
+ // Indicates whether logging is enabled.
+ Enabled *bool `locationName:"enabled" type:"boolean"`
+
+ // The ID of the CloudWatch Logs log group.
+ LogGroup *string `locationName:"logGroup" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogCloudWatchLogsDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogCloudWatchLogsDestination) GoString() string {
+ return s.String()
+}
+
+// SetDeliveryStatus sets the DeliveryStatus field's value.
+func (s *VerifiedAccessLogCloudWatchLogsDestination) SetDeliveryStatus(v *VerifiedAccessLogDeliveryStatus) *VerifiedAccessLogCloudWatchLogsDestination {
+ s.DeliveryStatus = v
+ return s
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *VerifiedAccessLogCloudWatchLogsDestination) SetEnabled(v bool) *VerifiedAccessLogCloudWatchLogsDestination {
+ s.Enabled = &v
+ return s
+}
+
+// SetLogGroup sets the LogGroup field's value.
+func (s *VerifiedAccessLogCloudWatchLogsDestination) SetLogGroup(v string) *VerifiedAccessLogCloudWatchLogsDestination {
+ s.LogGroup = &v
+ return s
+}
+
+// Options for CloudWatch Logs as a logging destination.
+type VerifiedAccessLogCloudWatchLogsDestinationOptions struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether logging is enabled.
+ //
+ // Enabled is a required field
+ Enabled *bool `type:"boolean" required:"true"`
+
+ // The ID of the CloudWatch Logs log group.
+ LogGroup *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogCloudWatchLogsDestinationOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogCloudWatchLogsDestinationOptions) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *VerifiedAccessLogCloudWatchLogsDestinationOptions) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "VerifiedAccessLogCloudWatchLogsDestinationOptions"}
+ if s.Enabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("Enabled"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *VerifiedAccessLogCloudWatchLogsDestinationOptions) SetEnabled(v bool) *VerifiedAccessLogCloudWatchLogsDestinationOptions {
+ s.Enabled = &v
+ return s
+}
+
+// SetLogGroup sets the LogGroup field's value.
+func (s *VerifiedAccessLogCloudWatchLogsDestinationOptions) SetLogGroup(v string) *VerifiedAccessLogCloudWatchLogsDestinationOptions {
+ s.LogGroup = &v
+ return s
+}
+
+// Describes a log delivery status.
+type VerifiedAccessLogDeliveryStatus struct {
+ _ struct{} `type:"structure"`
+
+ // The status code.
+ Code *string `locationName:"code" type:"string" enum:"VerifiedAccessLogDeliveryStatusCode"`
+
+ // The status message.
+ Message *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogDeliveryStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogDeliveryStatus) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *VerifiedAccessLogDeliveryStatus) SetCode(v string) *VerifiedAccessLogDeliveryStatus {
+ s.Code = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *VerifiedAccessLogDeliveryStatus) SetMessage(v string) *VerifiedAccessLogDeliveryStatus {
+ s.Message = &v
+ return s
+}
+
+// Options for Kinesis as a logging destination.
+type VerifiedAccessLogKinesisDataFirehoseDestination struct {
+ _ struct{} `type:"structure"`
+
+ // The delivery status.
+ DeliveryStatus *VerifiedAccessLogDeliveryStatus `locationName:"deliveryStatus" type:"structure"`
+
+ // The ID of the delivery stream.
+ DeliveryStream *string `locationName:"deliveryStream" type:"string"`
+
+ // Indicates whether logging is enabled.
+ Enabled *bool `locationName:"enabled" type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogKinesisDataFirehoseDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogKinesisDataFirehoseDestination) GoString() string {
+ return s.String()
+}
+
+// SetDeliveryStatus sets the DeliveryStatus field's value.
+func (s *VerifiedAccessLogKinesisDataFirehoseDestination) SetDeliveryStatus(v *VerifiedAccessLogDeliveryStatus) *VerifiedAccessLogKinesisDataFirehoseDestination {
+ s.DeliveryStatus = v
+ return s
+}
+
+// SetDeliveryStream sets the DeliveryStream field's value.
+func (s *VerifiedAccessLogKinesisDataFirehoseDestination) SetDeliveryStream(v string) *VerifiedAccessLogKinesisDataFirehoseDestination {
+ s.DeliveryStream = &v
+ return s
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *VerifiedAccessLogKinesisDataFirehoseDestination) SetEnabled(v bool) *VerifiedAccessLogKinesisDataFirehoseDestination {
+ s.Enabled = &v
+ return s
+}
+
+// Describes Amazon Kinesis Data Firehose logging options.
+type VerifiedAccessLogKinesisDataFirehoseDestinationOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the delivery stream.
+ DeliveryStream *string `type:"string"`
+
+ // Indicates whether logging is enabled.
+ //
+ // Enabled is a required field
+ Enabled *bool `type:"boolean" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogKinesisDataFirehoseDestinationOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogKinesisDataFirehoseDestinationOptions) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *VerifiedAccessLogKinesisDataFirehoseDestinationOptions) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "VerifiedAccessLogKinesisDataFirehoseDestinationOptions"}
+ if s.Enabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("Enabled"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDeliveryStream sets the DeliveryStream field's value.
+func (s *VerifiedAccessLogKinesisDataFirehoseDestinationOptions) SetDeliveryStream(v string) *VerifiedAccessLogKinesisDataFirehoseDestinationOptions {
+ s.DeliveryStream = &v
+ return s
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *VerifiedAccessLogKinesisDataFirehoseDestinationOptions) SetEnabled(v bool) *VerifiedAccessLogKinesisDataFirehoseDestinationOptions {
+ s.Enabled = &v
+ return s
+}
+
+// Describes the destinations for Verified Access logs.
+type VerifiedAccessLogOptions struct {
+ _ struct{} `type:"structure"`
+
+ // Sends Verified Access logs to CloudWatch Logs.
+ CloudWatchLogs *VerifiedAccessLogCloudWatchLogsDestinationOptions `type:"structure"`
+
+ // Sends Verified Access logs to Kinesis.
+ KinesisDataFirehose *VerifiedAccessLogKinesisDataFirehoseDestinationOptions `type:"structure"`
+
+ // Sends Verified Access logs to Amazon S3.
+ S3 *VerifiedAccessLogS3DestinationOptions `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogOptions) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *VerifiedAccessLogOptions) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "VerifiedAccessLogOptions"}
+ if s.CloudWatchLogs != nil {
+ if err := s.CloudWatchLogs.Validate(); err != nil {
+ invalidParams.AddNested("CloudWatchLogs", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.KinesisDataFirehose != nil {
+ if err := s.KinesisDataFirehose.Validate(); err != nil {
+ invalidParams.AddNested("KinesisDataFirehose", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.S3 != nil {
+ if err := s.S3.Validate(); err != nil {
+ invalidParams.AddNested("S3", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCloudWatchLogs sets the CloudWatchLogs field's value.
+func (s *VerifiedAccessLogOptions) SetCloudWatchLogs(v *VerifiedAccessLogCloudWatchLogsDestinationOptions) *VerifiedAccessLogOptions {
+ s.CloudWatchLogs = v
+ return s
+}
+
+// SetKinesisDataFirehose sets the KinesisDataFirehose field's value.
+func (s *VerifiedAccessLogOptions) SetKinesisDataFirehose(v *VerifiedAccessLogKinesisDataFirehoseDestinationOptions) *VerifiedAccessLogOptions {
+ s.KinesisDataFirehose = v
+ return s
+}
+
+// SetS3 sets the S3 field's value.
+func (s *VerifiedAccessLogOptions) SetS3(v *VerifiedAccessLogS3DestinationOptions) *VerifiedAccessLogOptions {
+ s.S3 = v
+ return s
+}
+
+// Options for Amazon S3 as a logging destination.
+type VerifiedAccessLogS3Destination struct {
+ _ struct{} `type:"structure"`
+
+ // The bucket name.
+ BucketName *string `locationName:"bucketName" type:"string"`
+
+ // The Amazon Web Services account number that owns the bucket.
+ BucketOwner *string `locationName:"bucketOwner" type:"string"`
+
+ // The delivery status.
+ DeliveryStatus *VerifiedAccessLogDeliveryStatus `locationName:"deliveryStatus" type:"structure"`
+
+ // Indicates whether logging is enabled.
+ Enabled *bool `locationName:"enabled" type:"boolean"`
+
+ // The bucket prefix.
+ Prefix *string `locationName:"prefix" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogS3Destination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogS3Destination) GoString() string {
+ return s.String()
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *VerifiedAccessLogS3Destination) SetBucketName(v string) *VerifiedAccessLogS3Destination {
+ s.BucketName = &v
+ return s
+}
+
+// SetBucketOwner sets the BucketOwner field's value.
+func (s *VerifiedAccessLogS3Destination) SetBucketOwner(v string) *VerifiedAccessLogS3Destination {
+ s.BucketOwner = &v
+ return s
+}
+
+// SetDeliveryStatus sets the DeliveryStatus field's value.
+func (s *VerifiedAccessLogS3Destination) SetDeliveryStatus(v *VerifiedAccessLogDeliveryStatus) *VerifiedAccessLogS3Destination {
+ s.DeliveryStatus = v
+ return s
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *VerifiedAccessLogS3Destination) SetEnabled(v bool) *VerifiedAccessLogS3Destination {
+ s.Enabled = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *VerifiedAccessLogS3Destination) SetPrefix(v string) *VerifiedAccessLogS3Destination {
+ s.Prefix = &v
+ return s
+}
+
+// Options for Amazon S3 as a logging destination.
+type VerifiedAccessLogS3DestinationOptions struct {
+ _ struct{} `type:"structure"`
+
+ // The bucket name.
+ BucketName *string `type:"string"`
+
+ // The ID of the Amazon Web Services account that owns the Amazon S3 bucket.
+ BucketOwner *string `type:"string"`
+
+ // Indicates whether logging is enabled.
+ //
+ // Enabled is a required field
+ Enabled *bool `type:"boolean" required:"true"`
+
+ // The bucket prefix.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogS3DestinationOptions) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogS3DestinationOptions) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *VerifiedAccessLogS3DestinationOptions) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "VerifiedAccessLogS3DestinationOptions"}
+ if s.Enabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("Enabled"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *VerifiedAccessLogS3DestinationOptions) SetBucketName(v string) *VerifiedAccessLogS3DestinationOptions {
+ s.BucketName = &v
+ return s
+}
+
+// SetBucketOwner sets the BucketOwner field's value.
+func (s *VerifiedAccessLogS3DestinationOptions) SetBucketOwner(v string) *VerifiedAccessLogS3DestinationOptions {
+ s.BucketOwner = &v
+ return s
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *VerifiedAccessLogS3DestinationOptions) SetEnabled(v bool) *VerifiedAccessLogS3DestinationOptions {
+ s.Enabled = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *VerifiedAccessLogS3DestinationOptions) SetPrefix(v string) *VerifiedAccessLogS3DestinationOptions {
+ s.Prefix = &v
+ return s
+}
+
+// Describes the destinations for Verified Access logs.
+type VerifiedAccessLogs struct {
+ _ struct{} `type:"structure"`
+
+ // CloudWatch Logs logging destination.
+ CloudWatchLogs *VerifiedAccessLogCloudWatchLogsDestination `locationName:"cloudWatchLogs" type:"structure"`
+
+ // Kinesis logging destination.
+ KinesisDataFirehose *VerifiedAccessLogKinesisDataFirehoseDestination `locationName:"kinesisDataFirehose" type:"structure"`
+
+ // Amazon S3 logging options.
+ S3 *VerifiedAccessLogS3Destination `locationName:"s3" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogs) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessLogs) GoString() string {
+ return s.String()
+}
+
+// SetCloudWatchLogs sets the CloudWatchLogs field's value.
+func (s *VerifiedAccessLogs) SetCloudWatchLogs(v *VerifiedAccessLogCloudWatchLogsDestination) *VerifiedAccessLogs {
+ s.CloudWatchLogs = v
+ return s
+}
+
+// SetKinesisDataFirehose sets the KinesisDataFirehose field's value.
+func (s *VerifiedAccessLogs) SetKinesisDataFirehose(v *VerifiedAccessLogKinesisDataFirehoseDestination) *VerifiedAccessLogs {
+ s.KinesisDataFirehose = v
+ return s
+}
+
+// SetS3 sets the S3 field's value.
+func (s *VerifiedAccessLogs) SetS3(v *VerifiedAccessLogS3Destination) *VerifiedAccessLogs {
+ s.S3 = v
+ return s
+}
+
+// Describes a Verified Access trust provider.
+type VerifiedAccessTrustProvider struct {
+ _ struct{} `type:"structure"`
+
+ // The creation time.
+ CreationTime *string `locationName:"creationTime" type:"string"`
+
+ // A description for the Amazon Web Services Verified Access trust provider.
+ Description *string `locationName:"description" type:"string"`
+
+ // The options for device-identity type trust provider.
+ DeviceOptions *DeviceOptions `locationName:"deviceOptions" type:"structure"`
+
+ // The type of device-based trust provider.
+ DeviceTrustProviderType *string `locationName:"deviceTrustProviderType" type:"string" enum:"DeviceTrustProviderType"`
+
+ // The last updated time.
+ LastUpdatedTime *string `locationName:"lastUpdatedTime" type:"string"`
+
+ // The OpenID Connect details for an oidc-type, user-identity based trust provider.
+ OidcOptions *OidcOptions `locationName:"oidcOptions" type:"structure"`
+
+ // The identifier to be used when working with policy rules.
+ PolicyReferenceName *string `locationName:"policyReferenceName" type:"string"`
+
+ // The tags.
+ Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
+
+ // The type of Verified Access trust provider.
+ TrustProviderType *string `locationName:"trustProviderType" type:"string" enum:"TrustProviderType"`
+
+ // The type of user-based trust provider.
+ UserTrustProviderType *string `locationName:"userTrustProviderType" type:"string" enum:"UserTrustProviderType"`
+
+ // The ID of the Amazon Web Services Verified Access trust provider.
+ VerifiedAccessTrustProviderId *string `locationName:"verifiedAccessTrustProviderId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessTrustProvider) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessTrustProvider) GoString() string {
+ return s.String()
+}
+
+// SetCreationTime sets the CreationTime field's value.
+func (s *VerifiedAccessTrustProvider) SetCreationTime(v string) *VerifiedAccessTrustProvider {
+ s.CreationTime = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *VerifiedAccessTrustProvider) SetDescription(v string) *VerifiedAccessTrustProvider {
+ s.Description = &v
+ return s
+}
+
+// SetDeviceOptions sets the DeviceOptions field's value.
+func (s *VerifiedAccessTrustProvider) SetDeviceOptions(v *DeviceOptions) *VerifiedAccessTrustProvider {
+ s.DeviceOptions = v
+ return s
+}
+
+// SetDeviceTrustProviderType sets the DeviceTrustProviderType field's value.
+func (s *VerifiedAccessTrustProvider) SetDeviceTrustProviderType(v string) *VerifiedAccessTrustProvider {
+ s.DeviceTrustProviderType = &v
+ return s
+}
+
+// SetLastUpdatedTime sets the LastUpdatedTime field's value.
+func (s *VerifiedAccessTrustProvider) SetLastUpdatedTime(v string) *VerifiedAccessTrustProvider {
+ s.LastUpdatedTime = &v
+ return s
+}
+
+// SetOidcOptions sets the OidcOptions field's value.
+func (s *VerifiedAccessTrustProvider) SetOidcOptions(v *OidcOptions) *VerifiedAccessTrustProvider {
+ s.OidcOptions = v
+ return s
+}
+
+// SetPolicyReferenceName sets the PolicyReferenceName field's value.
+func (s *VerifiedAccessTrustProvider) SetPolicyReferenceName(v string) *VerifiedAccessTrustProvider {
+ s.PolicyReferenceName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *VerifiedAccessTrustProvider) SetTags(v []*Tag) *VerifiedAccessTrustProvider {
+ s.Tags = v
+ return s
+}
+
+// SetTrustProviderType sets the TrustProviderType field's value.
+func (s *VerifiedAccessTrustProvider) SetTrustProviderType(v string) *VerifiedAccessTrustProvider {
+ s.TrustProviderType = &v
+ return s
+}
+
+// SetUserTrustProviderType sets the UserTrustProviderType field's value.
+func (s *VerifiedAccessTrustProvider) SetUserTrustProviderType(v string) *VerifiedAccessTrustProvider {
+ s.UserTrustProviderType = &v
+ return s
+}
+
+// SetVerifiedAccessTrustProviderId sets the VerifiedAccessTrustProviderId field's value.
+func (s *VerifiedAccessTrustProvider) SetVerifiedAccessTrustProviderId(v string) *VerifiedAccessTrustProvider {
+ s.VerifiedAccessTrustProviderId = &v
+ return s
+}
+
+// Condensed information about a trust provider.
+type VerifiedAccessTrustProviderCondensed struct {
+ _ struct{} `type:"structure"`
+
+ // The description of trust provider.
+ Description *string `locationName:"description" type:"string"`
+
+ // The type of device-based trust provider.
+ DeviceTrustProviderType *string `locationName:"deviceTrustProviderType" type:"string" enum:"DeviceTrustProviderType"`
+
+ // The type of trust provider (user- or device-based).
+ TrustProviderType *string `locationName:"trustProviderType" type:"string" enum:"TrustProviderType"`
+
+ // The type of user-based trust provider.
+ UserTrustProviderType *string `locationName:"userTrustProviderType" type:"string" enum:"UserTrustProviderType"`
+
+ // The ID of the trust provider.
+ VerifiedAccessTrustProviderId *string `locationName:"verifiedAccessTrustProviderId" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessTrustProviderCondensed) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s VerifiedAccessTrustProviderCondensed) GoString() string {
+ return s.String()
+}
+
+// SetDescription sets the Description field's value.
+func (s *VerifiedAccessTrustProviderCondensed) SetDescription(v string) *VerifiedAccessTrustProviderCondensed {
+ s.Description = &v
+ return s
+}
+
+// SetDeviceTrustProviderType sets the DeviceTrustProviderType field's value.
+func (s *VerifiedAccessTrustProviderCondensed) SetDeviceTrustProviderType(v string) *VerifiedAccessTrustProviderCondensed {
+ s.DeviceTrustProviderType = &v
+ return s
+}
+
+// SetTrustProviderType sets the TrustProviderType field's value.
+func (s *VerifiedAccessTrustProviderCondensed) SetTrustProviderType(v string) *VerifiedAccessTrustProviderCondensed {
+ s.TrustProviderType = &v
+ return s
+}
+
+// SetUserTrustProviderType sets the UserTrustProviderType field's value.
+func (s *VerifiedAccessTrustProviderCondensed) SetUserTrustProviderType(v string) *VerifiedAccessTrustProviderCondensed {
+ s.UserTrustProviderType = &v
+ return s
+}
+
+// SetVerifiedAccessTrustProviderId sets the VerifiedAccessTrustProviderId field's value.
+func (s *VerifiedAccessTrustProviderCondensed) SetVerifiedAccessTrustProviderId(v string) *VerifiedAccessTrustProviderCondensed {
+ s.VerifiedAccessTrustProviderId = &v
+ return s
+}
+
// Describes telemetry for a VPN tunnel.
type VgwTelemetry struct {
_ struct{} `type:"structure"`
@@ -166949,7 +179196,7 @@ type VpcEndpoint struct {
// The last error that occurred for endpoint.
LastError *LastError `locationName:"lastError" type:"structure"`
- // (Interface endpoint) One or more network interfaces for the endpoint.
+ // (Interface endpoint) The network interfaces for the endpoint.
NetworkInterfaceIds []*string `locationName:"networkInterfaceIdSet" locationNameList:"item" type:"list"`
// The ID of the Amazon Web Services account that owns the endpoint.
@@ -166965,7 +179212,7 @@ type VpcEndpoint struct {
// Indicates whether the endpoint is being managed by its service.
RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"`
- // (Gateway endpoint) One or more route tables associated with the endpoint.
+ // (Gateway endpoint) The IDs of the route tables associated with the endpoint.
RouteTableIds []*string `locationName:"routeTableIdSet" locationNameList:"item" type:"list"`
// The name of the service to which the endpoint is associated.
@@ -166977,7 +179224,7 @@ type VpcEndpoint struct {
// (Interface endpoint) The subnets for the endpoint.
SubnetIds []*string `locationName:"subnetIdSet" locationNameList:"item" type:"list"`
- // Any tags assigned to the endpoint.
+ // The tags assigned to the endpoint.
Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
// The ID of the endpoint.
@@ -168279,6 +180526,9 @@ type VpnTunnelOptionsSpecification struct {
// Default: 30
DPDTimeoutSeconds *int64 `type:"integer"`
+ // Turn on or off tunnel endpoint lifecycle control feature.
+ EnableTunnelLifecycleControl *bool `type:"boolean"`
+
// The IKE versions that are permitted for the VPN tunnel.
//
// Valid values: ikev1 | ikev2
@@ -168440,6 +180690,12 @@ func (s *VpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *VpnTunnel
return s
}
+// SetEnableTunnelLifecycleControl sets the EnableTunnelLifecycleControl field's value.
+func (s *VpnTunnelOptionsSpecification) SetEnableTunnelLifecycleControl(v bool) *VpnTunnelOptionsSpecification {
+ s.EnableTunnelLifecycleControl = &v
+ return s
+}
+
// SetIKEVersions sets the IKEVersions field's value.
func (s *VpnTunnelOptionsSpecification) SetIKEVersions(v []*IKEVersionsRequestListValue) *VpnTunnelOptionsSpecification {
s.IKEVersions = v
@@ -169253,6 +181509,9 @@ const (
// BootModeValuesUefi is a BootModeValues enum value
BootModeValuesUefi = "uefi"
+
+ // BootModeValuesUefiPreferred is a BootModeValues enum value
+ BootModeValuesUefiPreferred = "uefi-preferred"
)
// BootModeValues_Values returns all elements of the BootModeValues enum
@@ -169260,6 +181519,7 @@ func BootModeValues_Values() []string {
return []string{
BootModeValuesLegacyBios,
BootModeValuesUefi,
+ BootModeValuesUefiPreferred,
}
}
@@ -170011,6 +182271,22 @@ func DestinationFileFormat_Values() []string {
}
}
+const (
+ // DeviceTrustProviderTypeJamf is a DeviceTrustProviderType enum value
+ DeviceTrustProviderTypeJamf = "jamf"
+
+ // DeviceTrustProviderTypeCrowdstrike is a DeviceTrustProviderType enum value
+ DeviceTrustProviderTypeCrowdstrike = "crowdstrike"
+)
+
+// DeviceTrustProviderType_Values returns all elements of the DeviceTrustProviderType enum
+func DeviceTrustProviderType_Values() []string {
+ return []string{
+ DeviceTrustProviderTypeJamf,
+ DeviceTrustProviderTypeCrowdstrike,
+ }
+}
+
const (
// DeviceTypeEbs is a DeviceType enum value
DeviceTypeEbs = "ebs"
@@ -170795,6 +183071,22 @@ func GatewayType_Values() []string {
}
}
+const (
+ // HostMaintenanceOn is a HostMaintenance enum value
+ HostMaintenanceOn = "on"
+
+ // HostMaintenanceOff is a HostMaintenance enum value
+ HostMaintenanceOff = "off"
+)
+
+// HostMaintenance_Values returns all elements of the HostMaintenance enum
+func HostMaintenance_Values() []string {
+ return []string{
+ HostMaintenanceOn,
+ HostMaintenanceOff,
+ }
+}
+
const (
// HostRecoveryOn is a HostRecovery enum value
HostRecoveryOn = "on"
@@ -171127,6 +183419,22 @@ func InstanceAutoRecoveryState_Values() []string {
}
}
+const (
+ // InstanceBootModeValuesLegacyBios is a InstanceBootModeValues enum value
+ InstanceBootModeValuesLegacyBios = "legacy-bios"
+
+ // InstanceBootModeValuesUefi is a InstanceBootModeValues enum value
+ InstanceBootModeValuesUefi = "uefi"
+)
+
+// InstanceBootModeValues_Values returns all elements of the InstanceBootModeValues enum
+func InstanceBootModeValues_Values() []string {
+ return []string{
+ InstanceBootModeValuesLegacyBios,
+ InstanceBootModeValuesUefi,
+ }
+}
+
const (
// InstanceEventWindowStateCreating is a InstanceEventWindowState enum value
InstanceEventWindowStateCreating = "creating"
@@ -173082,6 +185390,216 @@ const (
// InstanceTypeTrn132xlarge is a InstanceType enum value
InstanceTypeTrn132xlarge = "trn1.32xlarge"
+
+ // InstanceTypeHpc6id32xlarge is a InstanceType enum value
+ InstanceTypeHpc6id32xlarge = "hpc6id.32xlarge"
+
+ // InstanceTypeC6inLarge is a InstanceType enum value
+ InstanceTypeC6inLarge = "c6in.large"
+
+ // InstanceTypeC6inXlarge is a InstanceType enum value
+ InstanceTypeC6inXlarge = "c6in.xlarge"
+
+ // InstanceTypeC6in2xlarge is a InstanceType enum value
+ InstanceTypeC6in2xlarge = "c6in.2xlarge"
+
+ // InstanceTypeC6in4xlarge is a InstanceType enum value
+ InstanceTypeC6in4xlarge = "c6in.4xlarge"
+
+ // InstanceTypeC6in8xlarge is a InstanceType enum value
+ InstanceTypeC6in8xlarge = "c6in.8xlarge"
+
+ // InstanceTypeC6in12xlarge is a InstanceType enum value
+ InstanceTypeC6in12xlarge = "c6in.12xlarge"
+
+ // InstanceTypeC6in16xlarge is a InstanceType enum value
+ InstanceTypeC6in16xlarge = "c6in.16xlarge"
+
+ // InstanceTypeC6in24xlarge is a InstanceType enum value
+ InstanceTypeC6in24xlarge = "c6in.24xlarge"
+
+ // InstanceTypeC6in32xlarge is a InstanceType enum value
+ InstanceTypeC6in32xlarge = "c6in.32xlarge"
+
+ // InstanceTypeM6inLarge is a InstanceType enum value
+ InstanceTypeM6inLarge = "m6in.large"
+
+ // InstanceTypeM6inXlarge is a InstanceType enum value
+ InstanceTypeM6inXlarge = "m6in.xlarge"
+
+ // InstanceTypeM6in2xlarge is a InstanceType enum value
+ InstanceTypeM6in2xlarge = "m6in.2xlarge"
+
+ // InstanceTypeM6in4xlarge is a InstanceType enum value
+ InstanceTypeM6in4xlarge = "m6in.4xlarge"
+
+ // InstanceTypeM6in8xlarge is a InstanceType enum value
+ InstanceTypeM6in8xlarge = "m6in.8xlarge"
+
+ // InstanceTypeM6in12xlarge is a InstanceType enum value
+ InstanceTypeM6in12xlarge = "m6in.12xlarge"
+
+ // InstanceTypeM6in16xlarge is a InstanceType enum value
+ InstanceTypeM6in16xlarge = "m6in.16xlarge"
+
+ // InstanceTypeM6in24xlarge is a InstanceType enum value
+ InstanceTypeM6in24xlarge = "m6in.24xlarge"
+
+ // InstanceTypeM6in32xlarge is a InstanceType enum value
+ InstanceTypeM6in32xlarge = "m6in.32xlarge"
+
+ // InstanceTypeM6idnLarge is a InstanceType enum value
+ InstanceTypeM6idnLarge = "m6idn.large"
+
+ // InstanceTypeM6idnXlarge is a InstanceType enum value
+ InstanceTypeM6idnXlarge = "m6idn.xlarge"
+
+ // InstanceTypeM6idn2xlarge is a InstanceType enum value
+ InstanceTypeM6idn2xlarge = "m6idn.2xlarge"
+
+ // InstanceTypeM6idn4xlarge is a InstanceType enum value
+ InstanceTypeM6idn4xlarge = "m6idn.4xlarge"
+
+ // InstanceTypeM6idn8xlarge is a InstanceType enum value
+ InstanceTypeM6idn8xlarge = "m6idn.8xlarge"
+
+ // InstanceTypeM6idn12xlarge is a InstanceType enum value
+ InstanceTypeM6idn12xlarge = "m6idn.12xlarge"
+
+ // InstanceTypeM6idn16xlarge is a InstanceType enum value
+ InstanceTypeM6idn16xlarge = "m6idn.16xlarge"
+
+ // InstanceTypeM6idn24xlarge is a InstanceType enum value
+ InstanceTypeM6idn24xlarge = "m6idn.24xlarge"
+
+ // InstanceTypeM6idn32xlarge is a InstanceType enum value
+ InstanceTypeM6idn32xlarge = "m6idn.32xlarge"
+
+ // InstanceTypeR6inLarge is a InstanceType enum value
+ InstanceTypeR6inLarge = "r6in.large"
+
+ // InstanceTypeR6inXlarge is a InstanceType enum value
+ InstanceTypeR6inXlarge = "r6in.xlarge"
+
+ // InstanceTypeR6in2xlarge is a InstanceType enum value
+ InstanceTypeR6in2xlarge = "r6in.2xlarge"
+
+ // InstanceTypeR6in4xlarge is a InstanceType enum value
+ InstanceTypeR6in4xlarge = "r6in.4xlarge"
+
+ // InstanceTypeR6in8xlarge is a InstanceType enum value
+ InstanceTypeR6in8xlarge = "r6in.8xlarge"
+
+ // InstanceTypeR6in12xlarge is a InstanceType enum value
+ InstanceTypeR6in12xlarge = "r6in.12xlarge"
+
+ // InstanceTypeR6in16xlarge is a InstanceType enum value
+ InstanceTypeR6in16xlarge = "r6in.16xlarge"
+
+ // InstanceTypeR6in24xlarge is a InstanceType enum value
+ InstanceTypeR6in24xlarge = "r6in.24xlarge"
+
+ // InstanceTypeR6in32xlarge is a InstanceType enum value
+ InstanceTypeR6in32xlarge = "r6in.32xlarge"
+
+ // InstanceTypeR6idnLarge is a InstanceType enum value
+ InstanceTypeR6idnLarge = "r6idn.large"
+
+ // InstanceTypeR6idnXlarge is a InstanceType enum value
+ InstanceTypeR6idnXlarge = "r6idn.xlarge"
+
+ // InstanceTypeR6idn2xlarge is a InstanceType enum value
+ InstanceTypeR6idn2xlarge = "r6idn.2xlarge"
+
+ // InstanceTypeR6idn4xlarge is a InstanceType enum value
+ InstanceTypeR6idn4xlarge = "r6idn.4xlarge"
+
+ // InstanceTypeR6idn8xlarge is a InstanceType enum value
+ InstanceTypeR6idn8xlarge = "r6idn.8xlarge"
+
+ // InstanceTypeR6idn12xlarge is a InstanceType enum value
+ InstanceTypeR6idn12xlarge = "r6idn.12xlarge"
+
+ // InstanceTypeR6idn16xlarge is a InstanceType enum value
+ InstanceTypeR6idn16xlarge = "r6idn.16xlarge"
+
+ // InstanceTypeR6idn24xlarge is a InstanceType enum value
+ InstanceTypeR6idn24xlarge = "r6idn.24xlarge"
+
+ // InstanceTypeR6idn32xlarge is a InstanceType enum value
+ InstanceTypeR6idn32xlarge = "r6idn.32xlarge"
+
+ // InstanceTypeC7gMetal is a InstanceType enum value
+ InstanceTypeC7gMetal = "c7g.metal"
+
+ // InstanceTypeM7gMedium is a InstanceType enum value
+ InstanceTypeM7gMedium = "m7g.medium"
+
+ // InstanceTypeM7gLarge is a InstanceType enum value
+ InstanceTypeM7gLarge = "m7g.large"
+
+ // InstanceTypeM7gXlarge is a InstanceType enum value
+ InstanceTypeM7gXlarge = "m7g.xlarge"
+
+ // InstanceTypeM7g2xlarge is a InstanceType enum value
+ InstanceTypeM7g2xlarge = "m7g.2xlarge"
+
+ // InstanceTypeM7g4xlarge is a InstanceType enum value
+ InstanceTypeM7g4xlarge = "m7g.4xlarge"
+
+ // InstanceTypeM7g8xlarge is a InstanceType enum value
+ InstanceTypeM7g8xlarge = "m7g.8xlarge"
+
+ // InstanceTypeM7g12xlarge is a InstanceType enum value
+ InstanceTypeM7g12xlarge = "m7g.12xlarge"
+
+ // InstanceTypeM7g16xlarge is a InstanceType enum value
+ InstanceTypeM7g16xlarge = "m7g.16xlarge"
+
+ // InstanceTypeM7gMetal is a InstanceType enum value
+ InstanceTypeM7gMetal = "m7g.metal"
+
+ // InstanceTypeR7gMedium is a InstanceType enum value
+ InstanceTypeR7gMedium = "r7g.medium"
+
+ // InstanceTypeR7gLarge is a InstanceType enum value
+ InstanceTypeR7gLarge = "r7g.large"
+
+ // InstanceTypeR7gXlarge is a InstanceType enum value
+ InstanceTypeR7gXlarge = "r7g.xlarge"
+
+ // InstanceTypeR7g2xlarge is a InstanceType enum value
+ InstanceTypeR7g2xlarge = "r7g.2xlarge"
+
+ // InstanceTypeR7g4xlarge is a InstanceType enum value
+ InstanceTypeR7g4xlarge = "r7g.4xlarge"
+
+ // InstanceTypeR7g8xlarge is a InstanceType enum value
+ InstanceTypeR7g8xlarge = "r7g.8xlarge"
+
+ // InstanceTypeR7g12xlarge is a InstanceType enum value
+ InstanceTypeR7g12xlarge = "r7g.12xlarge"
+
+ // InstanceTypeR7g16xlarge is a InstanceType enum value
+ InstanceTypeR7g16xlarge = "r7g.16xlarge"
+
+ // InstanceTypeR7gMetal is a InstanceType enum value
+ InstanceTypeR7gMetal = "r7g.metal"
+
+ // InstanceTypeC6inMetal is a InstanceType enum value
+ InstanceTypeC6inMetal = "c6in.metal"
+
+ // InstanceTypeM6inMetal is a InstanceType enum value
+ InstanceTypeM6inMetal = "m6in.metal"
+
+ // InstanceTypeM6idnMetal is a InstanceType enum value
+ InstanceTypeM6idnMetal = "m6idn.metal"
+
+ // InstanceTypeR6inMetal is a InstanceType enum value
+ InstanceTypeR6inMetal = "r6in.metal"
+
+ // InstanceTypeR6idnMetal is a InstanceType enum value
+ InstanceTypeR6idnMetal = "r6idn.metal"
)
// InstanceType_Values returns all elements of the InstanceType enum
@@ -173660,6 +186178,76 @@ func InstanceType_Values() []string {
InstanceTypeU24tb1112xlarge,
InstanceTypeTrn12xlarge,
InstanceTypeTrn132xlarge,
+ InstanceTypeHpc6id32xlarge,
+ InstanceTypeC6inLarge,
+ InstanceTypeC6inXlarge,
+ InstanceTypeC6in2xlarge,
+ InstanceTypeC6in4xlarge,
+ InstanceTypeC6in8xlarge,
+ InstanceTypeC6in12xlarge,
+ InstanceTypeC6in16xlarge,
+ InstanceTypeC6in24xlarge,
+ InstanceTypeC6in32xlarge,
+ InstanceTypeM6inLarge,
+ InstanceTypeM6inXlarge,
+ InstanceTypeM6in2xlarge,
+ InstanceTypeM6in4xlarge,
+ InstanceTypeM6in8xlarge,
+ InstanceTypeM6in12xlarge,
+ InstanceTypeM6in16xlarge,
+ InstanceTypeM6in24xlarge,
+ InstanceTypeM6in32xlarge,
+ InstanceTypeM6idnLarge,
+ InstanceTypeM6idnXlarge,
+ InstanceTypeM6idn2xlarge,
+ InstanceTypeM6idn4xlarge,
+ InstanceTypeM6idn8xlarge,
+ InstanceTypeM6idn12xlarge,
+ InstanceTypeM6idn16xlarge,
+ InstanceTypeM6idn24xlarge,
+ InstanceTypeM6idn32xlarge,
+ InstanceTypeR6inLarge,
+ InstanceTypeR6inXlarge,
+ InstanceTypeR6in2xlarge,
+ InstanceTypeR6in4xlarge,
+ InstanceTypeR6in8xlarge,
+ InstanceTypeR6in12xlarge,
+ InstanceTypeR6in16xlarge,
+ InstanceTypeR6in24xlarge,
+ InstanceTypeR6in32xlarge,
+ InstanceTypeR6idnLarge,
+ InstanceTypeR6idnXlarge,
+ InstanceTypeR6idn2xlarge,
+ InstanceTypeR6idn4xlarge,
+ InstanceTypeR6idn8xlarge,
+ InstanceTypeR6idn12xlarge,
+ InstanceTypeR6idn16xlarge,
+ InstanceTypeR6idn24xlarge,
+ InstanceTypeR6idn32xlarge,
+ InstanceTypeC7gMetal,
+ InstanceTypeM7gMedium,
+ InstanceTypeM7gLarge,
+ InstanceTypeM7gXlarge,
+ InstanceTypeM7g2xlarge,
+ InstanceTypeM7g4xlarge,
+ InstanceTypeM7g8xlarge,
+ InstanceTypeM7g12xlarge,
+ InstanceTypeM7g16xlarge,
+ InstanceTypeM7gMetal,
+ InstanceTypeR7gMedium,
+ InstanceTypeR7gLarge,
+ InstanceTypeR7gXlarge,
+ InstanceTypeR7g2xlarge,
+ InstanceTypeR7g4xlarge,
+ InstanceTypeR7g8xlarge,
+ InstanceTypeR7g12xlarge,
+ InstanceTypeR7g16xlarge,
+ InstanceTypeR7gMetal,
+ InstanceTypeC6inMetal,
+ InstanceTypeM6inMetal,
+ InstanceTypeM6idnMetal,
+ InstanceTypeR6inMetal,
+ InstanceTypeR6idnMetal,
}
}
@@ -173759,6 +186347,22 @@ func IpamAddressHistoryResourceType_Values() []string {
}
}
+const (
+ // IpamAssociatedResourceDiscoveryStatusActive is a IpamAssociatedResourceDiscoveryStatus enum value
+ IpamAssociatedResourceDiscoveryStatusActive = "active"
+
+ // IpamAssociatedResourceDiscoveryStatusNotFound is a IpamAssociatedResourceDiscoveryStatus enum value
+ IpamAssociatedResourceDiscoveryStatusNotFound = "not-found"
+)
+
+// IpamAssociatedResourceDiscoveryStatus_Values returns all elements of the IpamAssociatedResourceDiscoveryStatus enum
+func IpamAssociatedResourceDiscoveryStatus_Values() []string {
+ return []string{
+ IpamAssociatedResourceDiscoveryStatusActive,
+ IpamAssociatedResourceDiscoveryStatusNotFound,
+ }
+}
+
const (
// IpamComplianceStatusCompliant is a IpamComplianceStatus enum value
IpamComplianceStatusCompliant = "compliant"
@@ -173783,6 +186387,26 @@ func IpamComplianceStatus_Values() []string {
}
}
+const (
+ // IpamDiscoveryFailureCodeAssumeRoleFailure is a IpamDiscoveryFailureCode enum value
+ IpamDiscoveryFailureCodeAssumeRoleFailure = "assume-role-failure"
+
+ // IpamDiscoveryFailureCodeThrottlingFailure is a IpamDiscoveryFailureCode enum value
+ IpamDiscoveryFailureCodeThrottlingFailure = "throttling-failure"
+
+ // IpamDiscoveryFailureCodeUnauthorizedFailure is a IpamDiscoveryFailureCode enum value
+ IpamDiscoveryFailureCodeUnauthorizedFailure = "unauthorized-failure"
+)
+
+// IpamDiscoveryFailureCode_Values returns all elements of the IpamDiscoveryFailureCode enum
+func IpamDiscoveryFailureCode_Values() []string {
+ return []string{
+ IpamDiscoveryFailureCodeAssumeRoleFailure,
+ IpamDiscoveryFailureCodeThrottlingFailure,
+ IpamDiscoveryFailureCodeUnauthorizedFailure,
+ }
+}
+
const (
// IpamManagementStateManaged is a IpamManagementState enum value
IpamManagementStateManaged = "managed"
@@ -173862,12 +186486,16 @@ func IpamPoolAwsService_Values() []string {
const (
// IpamPoolCidrFailureCodeCidrNotAvailable is a IpamPoolCidrFailureCode enum value
IpamPoolCidrFailureCodeCidrNotAvailable = "cidr-not-available"
+
+ // IpamPoolCidrFailureCodeLimitExceeded is a IpamPoolCidrFailureCode enum value
+ IpamPoolCidrFailureCodeLimitExceeded = "limit-exceeded"
)
// IpamPoolCidrFailureCode_Values returns all elements of the IpamPoolCidrFailureCode enum
func IpamPoolCidrFailureCode_Values() []string {
return []string{
IpamPoolCidrFailureCodeCidrNotAvailable,
+ IpamPoolCidrFailureCodeLimitExceeded,
}
}
@@ -173911,6 +186539,22 @@ func IpamPoolCidrState_Values() []string {
}
}
+const (
+ // IpamPoolPublicIpSourceAmazon is a IpamPoolPublicIpSource enum value
+ IpamPoolPublicIpSourceAmazon = "amazon"
+
+ // IpamPoolPublicIpSourceByoip is a IpamPoolPublicIpSource enum value
+ IpamPoolPublicIpSourceByoip = "byoip"
+)
+
+// IpamPoolPublicIpSource_Values returns all elements of the IpamPoolPublicIpSource enum
+func IpamPoolPublicIpSource_Values() []string {
+ return []string{
+ IpamPoolPublicIpSourceAmazon,
+ IpamPoolPublicIpSourceByoip,
+ }
+}
+
const (
// IpamPoolStateCreateInProgress is a IpamPoolState enum value
IpamPoolStateCreateInProgress = "create-in-progress"
@@ -173967,6 +186611,106 @@ func IpamPoolState_Values() []string {
}
}
+const (
+ // IpamResourceDiscoveryAssociationStateAssociateInProgress is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateAssociateInProgress = "associate-in-progress"
+
+ // IpamResourceDiscoveryAssociationStateAssociateComplete is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateAssociateComplete = "associate-complete"
+
+ // IpamResourceDiscoveryAssociationStateAssociateFailed is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateAssociateFailed = "associate-failed"
+
+ // IpamResourceDiscoveryAssociationStateDisassociateInProgress is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateDisassociateInProgress = "disassociate-in-progress"
+
+ // IpamResourceDiscoveryAssociationStateDisassociateComplete is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateDisassociateComplete = "disassociate-complete"
+
+ // IpamResourceDiscoveryAssociationStateDisassociateFailed is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateDisassociateFailed = "disassociate-failed"
+
+ // IpamResourceDiscoveryAssociationStateIsolateInProgress is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateIsolateInProgress = "isolate-in-progress"
+
+ // IpamResourceDiscoveryAssociationStateIsolateComplete is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateIsolateComplete = "isolate-complete"
+
+ // IpamResourceDiscoveryAssociationStateRestoreInProgress is a IpamResourceDiscoveryAssociationState enum value
+ IpamResourceDiscoveryAssociationStateRestoreInProgress = "restore-in-progress"
+)
+
+// IpamResourceDiscoveryAssociationState_Values returns all elements of the IpamResourceDiscoveryAssociationState enum
+func IpamResourceDiscoveryAssociationState_Values() []string {
+ return []string{
+ IpamResourceDiscoveryAssociationStateAssociateInProgress,
+ IpamResourceDiscoveryAssociationStateAssociateComplete,
+ IpamResourceDiscoveryAssociationStateAssociateFailed,
+ IpamResourceDiscoveryAssociationStateDisassociateInProgress,
+ IpamResourceDiscoveryAssociationStateDisassociateComplete,
+ IpamResourceDiscoveryAssociationStateDisassociateFailed,
+ IpamResourceDiscoveryAssociationStateIsolateInProgress,
+ IpamResourceDiscoveryAssociationStateIsolateComplete,
+ IpamResourceDiscoveryAssociationStateRestoreInProgress,
+ }
+}
+
+const (
+ // IpamResourceDiscoveryStateCreateInProgress is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateCreateInProgress = "create-in-progress"
+
+ // IpamResourceDiscoveryStateCreateComplete is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateCreateComplete = "create-complete"
+
+ // IpamResourceDiscoveryStateCreateFailed is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateCreateFailed = "create-failed"
+
+ // IpamResourceDiscoveryStateModifyInProgress is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateModifyInProgress = "modify-in-progress"
+
+ // IpamResourceDiscoveryStateModifyComplete is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateModifyComplete = "modify-complete"
+
+ // IpamResourceDiscoveryStateModifyFailed is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateModifyFailed = "modify-failed"
+
+ // IpamResourceDiscoveryStateDeleteInProgress is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateDeleteInProgress = "delete-in-progress"
+
+ // IpamResourceDiscoveryStateDeleteComplete is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateDeleteComplete = "delete-complete"
+
+ // IpamResourceDiscoveryStateDeleteFailed is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateDeleteFailed = "delete-failed"
+
+ // IpamResourceDiscoveryStateIsolateInProgress is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateIsolateInProgress = "isolate-in-progress"
+
+ // IpamResourceDiscoveryStateIsolateComplete is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateIsolateComplete = "isolate-complete"
+
+ // IpamResourceDiscoveryStateRestoreInProgress is a IpamResourceDiscoveryState enum value
+ IpamResourceDiscoveryStateRestoreInProgress = "restore-in-progress"
+)
+
+// IpamResourceDiscoveryState_Values returns all elements of the IpamResourceDiscoveryState enum
+func IpamResourceDiscoveryState_Values() []string {
+ return []string{
+ IpamResourceDiscoveryStateCreateInProgress,
+ IpamResourceDiscoveryStateCreateComplete,
+ IpamResourceDiscoveryStateCreateFailed,
+ IpamResourceDiscoveryStateModifyInProgress,
+ IpamResourceDiscoveryStateModifyComplete,
+ IpamResourceDiscoveryStateModifyFailed,
+ IpamResourceDiscoveryStateDeleteInProgress,
+ IpamResourceDiscoveryStateDeleteComplete,
+ IpamResourceDiscoveryStateDeleteFailed,
+ IpamResourceDiscoveryStateIsolateInProgress,
+ IpamResourceDiscoveryStateIsolateComplete,
+ IpamResourceDiscoveryStateRestoreInProgress,
+ }
+}
+
const (
// IpamResourceTypeVpc is a IpamResourceType enum value
IpamResourceTypeVpc = "vpc"
@@ -174595,6 +187339,38 @@ func MulticastSupportValue_Values() []string {
}
}
+const (
+ // NatGatewayAddressStatusAssigning is a NatGatewayAddressStatus enum value
+ NatGatewayAddressStatusAssigning = "assigning"
+
+ // NatGatewayAddressStatusUnassigning is a NatGatewayAddressStatus enum value
+ NatGatewayAddressStatusUnassigning = "unassigning"
+
+ // NatGatewayAddressStatusAssociating is a NatGatewayAddressStatus enum value
+ NatGatewayAddressStatusAssociating = "associating"
+
+ // NatGatewayAddressStatusDisassociating is a NatGatewayAddressStatus enum value
+ NatGatewayAddressStatusDisassociating = "disassociating"
+
+ // NatGatewayAddressStatusSucceeded is a NatGatewayAddressStatus enum value
+ NatGatewayAddressStatusSucceeded = "succeeded"
+
+ // NatGatewayAddressStatusFailed is a NatGatewayAddressStatus enum value
+ NatGatewayAddressStatusFailed = "failed"
+)
+
+// NatGatewayAddressStatus_Values returns all elements of the NatGatewayAddressStatus enum
+func NatGatewayAddressStatus_Values() []string {
+ return []string{
+ NatGatewayAddressStatusAssigning,
+ NatGatewayAddressStatusUnassigning,
+ NatGatewayAddressStatusAssociating,
+ NatGatewayAddressStatusDisassociating,
+ NatGatewayAddressStatusSucceeded,
+ NatGatewayAddressStatusFailed,
+ }
+}
+
const (
// NatGatewayStatePending is a NatGatewayState enum value
NatGatewayStatePending = "pending"
@@ -175636,8 +188412,32 @@ const (
// ResourceTypeVpcEndpointConnectionDeviceType is a ResourceType enum value
ResourceTypeVpcEndpointConnectionDeviceType = "vpc-endpoint-connection-device-type"
+ // ResourceTypeVerifiedAccessInstance is a ResourceType enum value
+ ResourceTypeVerifiedAccessInstance = "verified-access-instance"
+
+ // ResourceTypeVerifiedAccessGroup is a ResourceType enum value
+ ResourceTypeVerifiedAccessGroup = "verified-access-group"
+
+ // ResourceTypeVerifiedAccessEndpoint is a ResourceType enum value
+ ResourceTypeVerifiedAccessEndpoint = "verified-access-endpoint"
+
+ // ResourceTypeVerifiedAccessPolicy is a ResourceType enum value
+ ResourceTypeVerifiedAccessPolicy = "verified-access-policy"
+
+ // ResourceTypeVerifiedAccessTrustProvider is a ResourceType enum value
+ ResourceTypeVerifiedAccessTrustProvider = "verified-access-trust-provider"
+
// ResourceTypeVpnConnectionDeviceType is a ResourceType enum value
ResourceTypeVpnConnectionDeviceType = "vpn-connection-device-type"
+
+ // ResourceTypeVpcBlockPublicAccessExclusion is a ResourceType enum value
+ ResourceTypeVpcBlockPublicAccessExclusion = "vpc-block-public-access-exclusion"
+
+ // ResourceTypeIpamResourceDiscovery is a ResourceType enum value
+ ResourceTypeIpamResourceDiscovery = "ipam-resource-discovery"
+
+ // ResourceTypeIpamResourceDiscoveryAssociation is a ResourceType enum value
+ ResourceTypeIpamResourceDiscoveryAssociation = "ipam-resource-discovery-association"
)
// ResourceType_Values returns all elements of the ResourceType enum
@@ -175719,7 +188519,15 @@ func ResourceType_Values() []string {
ResourceTypeCapacityReservationFleet,
ResourceTypeTrafficMirrorFilterRule,
ResourceTypeVpcEndpointConnectionDeviceType,
+ ResourceTypeVerifiedAccessInstance,
+ ResourceTypeVerifiedAccessGroup,
+ ResourceTypeVerifiedAccessEndpoint,
+ ResourceTypeVerifiedAccessPolicy,
+ ResourceTypeVerifiedAccessTrustProvider,
ResourceTypeVpnConnectionDeviceType,
+ ResourceTypeVpcBlockPublicAccessExclusion,
+ ResourceTypeIpamResourceDiscovery,
+ ResourceTypeIpamResourceDiscoveryAssociation,
}
}
@@ -176999,6 +189807,22 @@ func TransportProtocol_Values() []string {
}
}
+const (
+ // TrustProviderTypeUser is a TrustProviderType enum value
+ TrustProviderTypeUser = "user"
+
+ // TrustProviderTypeDevice is a TrustProviderType enum value
+ TrustProviderTypeDevice = "device"
+)
+
+// TrustProviderType_Values returns all elements of the TrustProviderType enum
+func TrustProviderType_Values() []string {
+ return []string{
+ TrustProviderTypeUser,
+ TrustProviderTypeDevice,
+ }
+}
+
const (
// TunnelInsideIpVersionIpv4 is a TunnelInsideIpVersion enum value
TunnelInsideIpVersionIpv4 = "ipv4"
@@ -177079,6 +189903,110 @@ func UsageClassType_Values() []string {
}
}
+const (
+ // UserTrustProviderTypeIamIdentityCenter is a UserTrustProviderType enum value
+ UserTrustProviderTypeIamIdentityCenter = "iam-identity-center"
+
+ // UserTrustProviderTypeOidc is a UserTrustProviderType enum value
+ UserTrustProviderTypeOidc = "oidc"
+)
+
+// UserTrustProviderType_Values returns all elements of the UserTrustProviderType enum
+func UserTrustProviderType_Values() []string {
+ return []string{
+ UserTrustProviderTypeIamIdentityCenter,
+ UserTrustProviderTypeOidc,
+ }
+}
+
+const (
+ // VerifiedAccessEndpointAttachmentTypeVpc is a VerifiedAccessEndpointAttachmentType enum value
+ VerifiedAccessEndpointAttachmentTypeVpc = "vpc"
+)
+
+// VerifiedAccessEndpointAttachmentType_Values returns all elements of the VerifiedAccessEndpointAttachmentType enum
+func VerifiedAccessEndpointAttachmentType_Values() []string {
+ return []string{
+ VerifiedAccessEndpointAttachmentTypeVpc,
+ }
+}
+
+const (
+ // VerifiedAccessEndpointProtocolHttp is a VerifiedAccessEndpointProtocol enum value
+ VerifiedAccessEndpointProtocolHttp = "http"
+
+ // VerifiedAccessEndpointProtocolHttps is a VerifiedAccessEndpointProtocol enum value
+ VerifiedAccessEndpointProtocolHttps = "https"
+)
+
+// VerifiedAccessEndpointProtocol_Values returns all elements of the VerifiedAccessEndpointProtocol enum
+func VerifiedAccessEndpointProtocol_Values() []string {
+ return []string{
+ VerifiedAccessEndpointProtocolHttp,
+ VerifiedAccessEndpointProtocolHttps,
+ }
+}
+
+const (
+ // VerifiedAccessEndpointStatusCodePending is a VerifiedAccessEndpointStatusCode enum value
+ VerifiedAccessEndpointStatusCodePending = "pending"
+
+ // VerifiedAccessEndpointStatusCodeActive is a VerifiedAccessEndpointStatusCode enum value
+ VerifiedAccessEndpointStatusCodeActive = "active"
+
+ // VerifiedAccessEndpointStatusCodeUpdating is a VerifiedAccessEndpointStatusCode enum value
+ VerifiedAccessEndpointStatusCodeUpdating = "updating"
+
+ // VerifiedAccessEndpointStatusCodeDeleting is a VerifiedAccessEndpointStatusCode enum value
+ VerifiedAccessEndpointStatusCodeDeleting = "deleting"
+
+ // VerifiedAccessEndpointStatusCodeDeleted is a VerifiedAccessEndpointStatusCode enum value
+ VerifiedAccessEndpointStatusCodeDeleted = "deleted"
+)
+
+// VerifiedAccessEndpointStatusCode_Values returns all elements of the VerifiedAccessEndpointStatusCode enum
+func VerifiedAccessEndpointStatusCode_Values() []string {
+ return []string{
+ VerifiedAccessEndpointStatusCodePending,
+ VerifiedAccessEndpointStatusCodeActive,
+ VerifiedAccessEndpointStatusCodeUpdating,
+ VerifiedAccessEndpointStatusCodeDeleting,
+ VerifiedAccessEndpointStatusCodeDeleted,
+ }
+}
+
+const (
+ // VerifiedAccessEndpointTypeLoadBalancer is a VerifiedAccessEndpointType enum value
+ VerifiedAccessEndpointTypeLoadBalancer = "load-balancer"
+
+ // VerifiedAccessEndpointTypeNetworkInterface is a VerifiedAccessEndpointType enum value
+ VerifiedAccessEndpointTypeNetworkInterface = "network-interface"
+)
+
+// VerifiedAccessEndpointType_Values returns all elements of the VerifiedAccessEndpointType enum
+func VerifiedAccessEndpointType_Values() []string {
+ return []string{
+ VerifiedAccessEndpointTypeLoadBalancer,
+ VerifiedAccessEndpointTypeNetworkInterface,
+ }
+}
+
+const (
+ // VerifiedAccessLogDeliveryStatusCodeSuccess is a VerifiedAccessLogDeliveryStatusCode enum value
+ VerifiedAccessLogDeliveryStatusCodeSuccess = "success"
+
+ // VerifiedAccessLogDeliveryStatusCodeFailed is a VerifiedAccessLogDeliveryStatusCode enum value
+ VerifiedAccessLogDeliveryStatusCodeFailed = "failed"
+)
+
+// VerifiedAccessLogDeliveryStatusCode_Values returns all elements of the VerifiedAccessLogDeliveryStatusCode enum
+func VerifiedAccessLogDeliveryStatusCode_Values() []string {
+ return []string{
+ VerifiedAccessLogDeliveryStatusCodeSuccess,
+ VerifiedAccessLogDeliveryStatusCodeFailed,
+ }
+}
+
const (
// VirtualizationTypeHvm is a VirtualizationType enum value
VirtualizationTypeHvm = "hvm"
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
index d563a9fb230f..770e43bd81c3 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
@@ -16,17 +16,17 @@
//
// To learn more, see the following resources:
//
-// - Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon
-// EC2 documentation (http://aws.amazon.com/documentation/ec2)
+// - Amazon EC2: Amazon EC2 product page (http://aws.amazon.com/ec2), Amazon
+// EC2 documentation (https://docs.aws.amazon.com/ec2/index.html)
//
// - Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon
-// EBS documentation (http://aws.amazon.com/documentation/ebs)
+// EBS documentation (https://docs.aws.amazon.com/ebs/index.html)
//
// - Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon
-// VPC documentation (http://aws.amazon.com/documentation/vpc)
+// VPC documentation (https://docs.aws.amazon.com/vpc/index.html)
//
-// - Amazon Web Services VPN: Amazon Web Services VPN product page (http://aws.amazon.com/vpn),
-// Amazon Web Services VPN documentation (http://aws.amazon.com/documentation/vpn)
+// - VPN: VPN product page (http://aws.amazon.com/vpn), VPN documentation
+// (https://docs.aws.amazon.com/vpn/index.html)
//
// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service.
//
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
new file mode 100644
index 000000000000..07465e34aaf3
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
@@ -0,0 +1,2935 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package ec2iface provides an interface to enable mocking the Amazon Elastic Compute Cloud service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package ec2iface
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/ec2"
+)
+
+// EC2API provides an interface to enable mocking the
+// ec2.EC2 service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // Amazon Elastic Compute Cloud.
+// func myFunc(svc ec2iface.EC2API) bool {
+// // Make svc.AcceptAddressTransfer request
+// }
+//
+// func main() {
+// sess := session.New()
+// svc := ec2.New(sess)
+//
+// myFunc(svc)
+// }
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockEC2Client struct {
+// ec2iface.EC2API
+// }
+// func (m *mockEC2Client) AcceptAddressTransfer(input *ec2.AcceptAddressTransferInput) (*ec2.AcceptAddressTransferOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockEC2Client{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type EC2API interface {
+ AcceptAddressTransfer(*ec2.AcceptAddressTransferInput) (*ec2.AcceptAddressTransferOutput, error)
+ AcceptAddressTransferWithContext(aws.Context, *ec2.AcceptAddressTransferInput, ...request.Option) (*ec2.AcceptAddressTransferOutput, error)
+ AcceptAddressTransferRequest(*ec2.AcceptAddressTransferInput) (*request.Request, *ec2.AcceptAddressTransferOutput)
+
+ AcceptReservedInstancesExchangeQuote(*ec2.AcceptReservedInstancesExchangeQuoteInput) (*ec2.AcceptReservedInstancesExchangeQuoteOutput, error)
+ AcceptReservedInstancesExchangeQuoteWithContext(aws.Context, *ec2.AcceptReservedInstancesExchangeQuoteInput, ...request.Option) (*ec2.AcceptReservedInstancesExchangeQuoteOutput, error)
+ AcceptReservedInstancesExchangeQuoteRequest(*ec2.AcceptReservedInstancesExchangeQuoteInput) (*request.Request, *ec2.AcceptReservedInstancesExchangeQuoteOutput)
+
+ AcceptTransitGatewayMulticastDomainAssociations(*ec2.AcceptTransitGatewayMulticastDomainAssociationsInput) (*ec2.AcceptTransitGatewayMulticastDomainAssociationsOutput, error)
+ AcceptTransitGatewayMulticastDomainAssociationsWithContext(aws.Context, *ec2.AcceptTransitGatewayMulticastDomainAssociationsInput, ...request.Option) (*ec2.AcceptTransitGatewayMulticastDomainAssociationsOutput, error)
+ AcceptTransitGatewayMulticastDomainAssociationsRequest(*ec2.AcceptTransitGatewayMulticastDomainAssociationsInput) (*request.Request, *ec2.AcceptTransitGatewayMulticastDomainAssociationsOutput)
+
+ AcceptTransitGatewayPeeringAttachment(*ec2.AcceptTransitGatewayPeeringAttachmentInput) (*ec2.AcceptTransitGatewayPeeringAttachmentOutput, error)
+ AcceptTransitGatewayPeeringAttachmentWithContext(aws.Context, *ec2.AcceptTransitGatewayPeeringAttachmentInput, ...request.Option) (*ec2.AcceptTransitGatewayPeeringAttachmentOutput, error)
+ AcceptTransitGatewayPeeringAttachmentRequest(*ec2.AcceptTransitGatewayPeeringAttachmentInput) (*request.Request, *ec2.AcceptTransitGatewayPeeringAttachmentOutput)
+
+ AcceptTransitGatewayVpcAttachment(*ec2.AcceptTransitGatewayVpcAttachmentInput) (*ec2.AcceptTransitGatewayVpcAttachmentOutput, error)
+ AcceptTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.AcceptTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.AcceptTransitGatewayVpcAttachmentOutput, error)
+ AcceptTransitGatewayVpcAttachmentRequest(*ec2.AcceptTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.AcceptTransitGatewayVpcAttachmentOutput)
+
+ AcceptVpcEndpointConnections(*ec2.AcceptVpcEndpointConnectionsInput) (*ec2.AcceptVpcEndpointConnectionsOutput, error)
+ AcceptVpcEndpointConnectionsWithContext(aws.Context, *ec2.AcceptVpcEndpointConnectionsInput, ...request.Option) (*ec2.AcceptVpcEndpointConnectionsOutput, error)
+ AcceptVpcEndpointConnectionsRequest(*ec2.AcceptVpcEndpointConnectionsInput) (*request.Request, *ec2.AcceptVpcEndpointConnectionsOutput)
+
+ AcceptVpcPeeringConnection(*ec2.AcceptVpcPeeringConnectionInput) (*ec2.AcceptVpcPeeringConnectionOutput, error)
+ AcceptVpcPeeringConnectionWithContext(aws.Context, *ec2.AcceptVpcPeeringConnectionInput, ...request.Option) (*ec2.AcceptVpcPeeringConnectionOutput, error)
+ AcceptVpcPeeringConnectionRequest(*ec2.AcceptVpcPeeringConnectionInput) (*request.Request, *ec2.AcceptVpcPeeringConnectionOutput)
+
+ AdvertiseByoipCidr(*ec2.AdvertiseByoipCidrInput) (*ec2.AdvertiseByoipCidrOutput, error)
+ AdvertiseByoipCidrWithContext(aws.Context, *ec2.AdvertiseByoipCidrInput, ...request.Option) (*ec2.AdvertiseByoipCidrOutput, error)
+ AdvertiseByoipCidrRequest(*ec2.AdvertiseByoipCidrInput) (*request.Request, *ec2.AdvertiseByoipCidrOutput)
+
+ AllocateAddress(*ec2.AllocateAddressInput) (*ec2.AllocateAddressOutput, error)
+ AllocateAddressWithContext(aws.Context, *ec2.AllocateAddressInput, ...request.Option) (*ec2.AllocateAddressOutput, error)
+ AllocateAddressRequest(*ec2.AllocateAddressInput) (*request.Request, *ec2.AllocateAddressOutput)
+
+ AllocateHosts(*ec2.AllocateHostsInput) (*ec2.AllocateHostsOutput, error)
+ AllocateHostsWithContext(aws.Context, *ec2.AllocateHostsInput, ...request.Option) (*ec2.AllocateHostsOutput, error)
+ AllocateHostsRequest(*ec2.AllocateHostsInput) (*request.Request, *ec2.AllocateHostsOutput)
+
+ AllocateIpamPoolCidr(*ec2.AllocateIpamPoolCidrInput) (*ec2.AllocateIpamPoolCidrOutput, error)
+ AllocateIpamPoolCidrWithContext(aws.Context, *ec2.AllocateIpamPoolCidrInput, ...request.Option) (*ec2.AllocateIpamPoolCidrOutput, error)
+ AllocateIpamPoolCidrRequest(*ec2.AllocateIpamPoolCidrInput) (*request.Request, *ec2.AllocateIpamPoolCidrOutput)
+
+ ApplySecurityGroupsToClientVpnTargetNetwork(*ec2.ApplySecurityGroupsToClientVpnTargetNetworkInput) (*ec2.ApplySecurityGroupsToClientVpnTargetNetworkOutput, error)
+ ApplySecurityGroupsToClientVpnTargetNetworkWithContext(aws.Context, *ec2.ApplySecurityGroupsToClientVpnTargetNetworkInput, ...request.Option) (*ec2.ApplySecurityGroupsToClientVpnTargetNetworkOutput, error)
+ ApplySecurityGroupsToClientVpnTargetNetworkRequest(*ec2.ApplySecurityGroupsToClientVpnTargetNetworkInput) (*request.Request, *ec2.ApplySecurityGroupsToClientVpnTargetNetworkOutput)
+
+ AssignIpv6Addresses(*ec2.AssignIpv6AddressesInput) (*ec2.AssignIpv6AddressesOutput, error)
+ AssignIpv6AddressesWithContext(aws.Context, *ec2.AssignIpv6AddressesInput, ...request.Option) (*ec2.AssignIpv6AddressesOutput, error)
+ AssignIpv6AddressesRequest(*ec2.AssignIpv6AddressesInput) (*request.Request, *ec2.AssignIpv6AddressesOutput)
+
+ AssignPrivateIpAddresses(*ec2.AssignPrivateIpAddressesInput) (*ec2.AssignPrivateIpAddressesOutput, error)
+ AssignPrivateIpAddressesWithContext(aws.Context, *ec2.AssignPrivateIpAddressesInput, ...request.Option) (*ec2.AssignPrivateIpAddressesOutput, error)
+ AssignPrivateIpAddressesRequest(*ec2.AssignPrivateIpAddressesInput) (*request.Request, *ec2.AssignPrivateIpAddressesOutput)
+
+ AssignPrivateNatGatewayAddress(*ec2.AssignPrivateNatGatewayAddressInput) (*ec2.AssignPrivateNatGatewayAddressOutput, error)
+ AssignPrivateNatGatewayAddressWithContext(aws.Context, *ec2.AssignPrivateNatGatewayAddressInput, ...request.Option) (*ec2.AssignPrivateNatGatewayAddressOutput, error)
+ AssignPrivateNatGatewayAddressRequest(*ec2.AssignPrivateNatGatewayAddressInput) (*request.Request, *ec2.AssignPrivateNatGatewayAddressOutput)
+
+ AssociateAddress(*ec2.AssociateAddressInput) (*ec2.AssociateAddressOutput, error)
+ AssociateAddressWithContext(aws.Context, *ec2.AssociateAddressInput, ...request.Option) (*ec2.AssociateAddressOutput, error)
+ AssociateAddressRequest(*ec2.AssociateAddressInput) (*request.Request, *ec2.AssociateAddressOutput)
+
+ AssociateClientVpnTargetNetwork(*ec2.AssociateClientVpnTargetNetworkInput) (*ec2.AssociateClientVpnTargetNetworkOutput, error)
+ AssociateClientVpnTargetNetworkWithContext(aws.Context, *ec2.AssociateClientVpnTargetNetworkInput, ...request.Option) (*ec2.AssociateClientVpnTargetNetworkOutput, error)
+ AssociateClientVpnTargetNetworkRequest(*ec2.AssociateClientVpnTargetNetworkInput) (*request.Request, *ec2.AssociateClientVpnTargetNetworkOutput)
+
+ AssociateDhcpOptions(*ec2.AssociateDhcpOptionsInput) (*ec2.AssociateDhcpOptionsOutput, error)
+ AssociateDhcpOptionsWithContext(aws.Context, *ec2.AssociateDhcpOptionsInput, ...request.Option) (*ec2.AssociateDhcpOptionsOutput, error)
+ AssociateDhcpOptionsRequest(*ec2.AssociateDhcpOptionsInput) (*request.Request, *ec2.AssociateDhcpOptionsOutput)
+
+ AssociateEnclaveCertificateIamRole(*ec2.AssociateEnclaveCertificateIamRoleInput) (*ec2.AssociateEnclaveCertificateIamRoleOutput, error)
+ AssociateEnclaveCertificateIamRoleWithContext(aws.Context, *ec2.AssociateEnclaveCertificateIamRoleInput, ...request.Option) (*ec2.AssociateEnclaveCertificateIamRoleOutput, error)
+ AssociateEnclaveCertificateIamRoleRequest(*ec2.AssociateEnclaveCertificateIamRoleInput) (*request.Request, *ec2.AssociateEnclaveCertificateIamRoleOutput)
+
+ AssociateIamInstanceProfile(*ec2.AssociateIamInstanceProfileInput) (*ec2.AssociateIamInstanceProfileOutput, error)
+ AssociateIamInstanceProfileWithContext(aws.Context, *ec2.AssociateIamInstanceProfileInput, ...request.Option) (*ec2.AssociateIamInstanceProfileOutput, error)
+ AssociateIamInstanceProfileRequest(*ec2.AssociateIamInstanceProfileInput) (*request.Request, *ec2.AssociateIamInstanceProfileOutput)
+
+ AssociateInstanceEventWindow(*ec2.AssociateInstanceEventWindowInput) (*ec2.AssociateInstanceEventWindowOutput, error)
+ AssociateInstanceEventWindowWithContext(aws.Context, *ec2.AssociateInstanceEventWindowInput, ...request.Option) (*ec2.AssociateInstanceEventWindowOutput, error)
+ AssociateInstanceEventWindowRequest(*ec2.AssociateInstanceEventWindowInput) (*request.Request, *ec2.AssociateInstanceEventWindowOutput)
+
+ AssociateIpamResourceDiscovery(*ec2.AssociateIpamResourceDiscoveryInput) (*ec2.AssociateIpamResourceDiscoveryOutput, error)
+ AssociateIpamResourceDiscoveryWithContext(aws.Context, *ec2.AssociateIpamResourceDiscoveryInput, ...request.Option) (*ec2.AssociateIpamResourceDiscoveryOutput, error)
+ AssociateIpamResourceDiscoveryRequest(*ec2.AssociateIpamResourceDiscoveryInput) (*request.Request, *ec2.AssociateIpamResourceDiscoveryOutput)
+
+ AssociateNatGatewayAddress(*ec2.AssociateNatGatewayAddressInput) (*ec2.AssociateNatGatewayAddressOutput, error)
+ AssociateNatGatewayAddressWithContext(aws.Context, *ec2.AssociateNatGatewayAddressInput, ...request.Option) (*ec2.AssociateNatGatewayAddressOutput, error)
+ AssociateNatGatewayAddressRequest(*ec2.AssociateNatGatewayAddressInput) (*request.Request, *ec2.AssociateNatGatewayAddressOutput)
+
+ AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error)
+ AssociateRouteTableWithContext(aws.Context, *ec2.AssociateRouteTableInput, ...request.Option) (*ec2.AssociateRouteTableOutput, error)
+ AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput)
+
+ AssociateSubnetCidrBlock(*ec2.AssociateSubnetCidrBlockInput) (*ec2.AssociateSubnetCidrBlockOutput, error)
+ AssociateSubnetCidrBlockWithContext(aws.Context, *ec2.AssociateSubnetCidrBlockInput, ...request.Option) (*ec2.AssociateSubnetCidrBlockOutput, error)
+ AssociateSubnetCidrBlockRequest(*ec2.AssociateSubnetCidrBlockInput) (*request.Request, *ec2.AssociateSubnetCidrBlockOutput)
+
+ AssociateTransitGatewayMulticastDomain(*ec2.AssociateTransitGatewayMulticastDomainInput) (*ec2.AssociateTransitGatewayMulticastDomainOutput, error)
+ AssociateTransitGatewayMulticastDomainWithContext(aws.Context, *ec2.AssociateTransitGatewayMulticastDomainInput, ...request.Option) (*ec2.AssociateTransitGatewayMulticastDomainOutput, error)
+ AssociateTransitGatewayMulticastDomainRequest(*ec2.AssociateTransitGatewayMulticastDomainInput) (*request.Request, *ec2.AssociateTransitGatewayMulticastDomainOutput)
+
+ AssociateTransitGatewayPolicyTable(*ec2.AssociateTransitGatewayPolicyTableInput) (*ec2.AssociateTransitGatewayPolicyTableOutput, error)
+ AssociateTransitGatewayPolicyTableWithContext(aws.Context, *ec2.AssociateTransitGatewayPolicyTableInput, ...request.Option) (*ec2.AssociateTransitGatewayPolicyTableOutput, error)
+ AssociateTransitGatewayPolicyTableRequest(*ec2.AssociateTransitGatewayPolicyTableInput) (*request.Request, *ec2.AssociateTransitGatewayPolicyTableOutput)
+
+ AssociateTransitGatewayRouteTable(*ec2.AssociateTransitGatewayRouteTableInput) (*ec2.AssociateTransitGatewayRouteTableOutput, error)
+ AssociateTransitGatewayRouteTableWithContext(aws.Context, *ec2.AssociateTransitGatewayRouteTableInput, ...request.Option) (*ec2.AssociateTransitGatewayRouteTableOutput, error)
+ AssociateTransitGatewayRouteTableRequest(*ec2.AssociateTransitGatewayRouteTableInput) (*request.Request, *ec2.AssociateTransitGatewayRouteTableOutput)
+
+ AssociateTrunkInterface(*ec2.AssociateTrunkInterfaceInput) (*ec2.AssociateTrunkInterfaceOutput, error)
+ AssociateTrunkInterfaceWithContext(aws.Context, *ec2.AssociateTrunkInterfaceInput, ...request.Option) (*ec2.AssociateTrunkInterfaceOutput, error)
+ AssociateTrunkInterfaceRequest(*ec2.AssociateTrunkInterfaceInput) (*request.Request, *ec2.AssociateTrunkInterfaceOutput)
+
+ AssociateVpcCidrBlock(*ec2.AssociateVpcCidrBlockInput) (*ec2.AssociateVpcCidrBlockOutput, error)
+ AssociateVpcCidrBlockWithContext(aws.Context, *ec2.AssociateVpcCidrBlockInput, ...request.Option) (*ec2.AssociateVpcCidrBlockOutput, error)
+ AssociateVpcCidrBlockRequest(*ec2.AssociateVpcCidrBlockInput) (*request.Request, *ec2.AssociateVpcCidrBlockOutput)
+
+ AttachClassicLinkVpc(*ec2.AttachClassicLinkVpcInput) (*ec2.AttachClassicLinkVpcOutput, error)
+ AttachClassicLinkVpcWithContext(aws.Context, *ec2.AttachClassicLinkVpcInput, ...request.Option) (*ec2.AttachClassicLinkVpcOutput, error)
+ AttachClassicLinkVpcRequest(*ec2.AttachClassicLinkVpcInput) (*request.Request, *ec2.AttachClassicLinkVpcOutput)
+
+ AttachInternetGateway(*ec2.AttachInternetGatewayInput) (*ec2.AttachInternetGatewayOutput, error)
+ AttachInternetGatewayWithContext(aws.Context, *ec2.AttachInternetGatewayInput, ...request.Option) (*ec2.AttachInternetGatewayOutput, error)
+ AttachInternetGatewayRequest(*ec2.AttachInternetGatewayInput) (*request.Request, *ec2.AttachInternetGatewayOutput)
+
+ AttachNetworkInterface(*ec2.AttachNetworkInterfaceInput) (*ec2.AttachNetworkInterfaceOutput, error)
+ AttachNetworkInterfaceWithContext(aws.Context, *ec2.AttachNetworkInterfaceInput, ...request.Option) (*ec2.AttachNetworkInterfaceOutput, error)
+ AttachNetworkInterfaceRequest(*ec2.AttachNetworkInterfaceInput) (*request.Request, *ec2.AttachNetworkInterfaceOutput)
+
+ AttachVerifiedAccessTrustProvider(*ec2.AttachVerifiedAccessTrustProviderInput) (*ec2.AttachVerifiedAccessTrustProviderOutput, error)
+ AttachVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.AttachVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.AttachVerifiedAccessTrustProviderOutput, error)
+ AttachVerifiedAccessTrustProviderRequest(*ec2.AttachVerifiedAccessTrustProviderInput) (*request.Request, *ec2.AttachVerifiedAccessTrustProviderOutput)
+
+ AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error)
+ AttachVolumeWithContext(aws.Context, *ec2.AttachVolumeInput, ...request.Option) (*ec2.VolumeAttachment, error)
+ AttachVolumeRequest(*ec2.AttachVolumeInput) (*request.Request, *ec2.VolumeAttachment)
+
+ AttachVpnGateway(*ec2.AttachVpnGatewayInput) (*ec2.AttachVpnGatewayOutput, error)
+ AttachVpnGatewayWithContext(aws.Context, *ec2.AttachVpnGatewayInput, ...request.Option) (*ec2.AttachVpnGatewayOutput, error)
+ AttachVpnGatewayRequest(*ec2.AttachVpnGatewayInput) (*request.Request, *ec2.AttachVpnGatewayOutput)
+
+ AuthorizeClientVpnIngress(*ec2.AuthorizeClientVpnIngressInput) (*ec2.AuthorizeClientVpnIngressOutput, error)
+ AuthorizeClientVpnIngressWithContext(aws.Context, *ec2.AuthorizeClientVpnIngressInput, ...request.Option) (*ec2.AuthorizeClientVpnIngressOutput, error)
+ AuthorizeClientVpnIngressRequest(*ec2.AuthorizeClientVpnIngressInput) (*request.Request, *ec2.AuthorizeClientVpnIngressOutput)
+
+ AuthorizeSecurityGroupEgress(*ec2.AuthorizeSecurityGroupEgressInput) (*ec2.AuthorizeSecurityGroupEgressOutput, error)
+ AuthorizeSecurityGroupEgressWithContext(aws.Context, *ec2.AuthorizeSecurityGroupEgressInput, ...request.Option) (*ec2.AuthorizeSecurityGroupEgressOutput, error)
+ AuthorizeSecurityGroupEgressRequest(*ec2.AuthorizeSecurityGroupEgressInput) (*request.Request, *ec2.AuthorizeSecurityGroupEgressOutput)
+
+ AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
+ AuthorizeSecurityGroupIngressWithContext(aws.Context, *ec2.AuthorizeSecurityGroupIngressInput, ...request.Option) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
+ AuthorizeSecurityGroupIngressRequest(*ec2.AuthorizeSecurityGroupIngressInput) (*request.Request, *ec2.AuthorizeSecurityGroupIngressOutput)
+
+ BundleInstance(*ec2.BundleInstanceInput) (*ec2.BundleInstanceOutput, error)
+ BundleInstanceWithContext(aws.Context, *ec2.BundleInstanceInput, ...request.Option) (*ec2.BundleInstanceOutput, error)
+ BundleInstanceRequest(*ec2.BundleInstanceInput) (*request.Request, *ec2.BundleInstanceOutput)
+
+ CancelBundleTask(*ec2.CancelBundleTaskInput) (*ec2.CancelBundleTaskOutput, error)
+ CancelBundleTaskWithContext(aws.Context, *ec2.CancelBundleTaskInput, ...request.Option) (*ec2.CancelBundleTaskOutput, error)
+ CancelBundleTaskRequest(*ec2.CancelBundleTaskInput) (*request.Request, *ec2.CancelBundleTaskOutput)
+
+ CancelCapacityReservation(*ec2.CancelCapacityReservationInput) (*ec2.CancelCapacityReservationOutput, error)
+ CancelCapacityReservationWithContext(aws.Context, *ec2.CancelCapacityReservationInput, ...request.Option) (*ec2.CancelCapacityReservationOutput, error)
+ CancelCapacityReservationRequest(*ec2.CancelCapacityReservationInput) (*request.Request, *ec2.CancelCapacityReservationOutput)
+
+ CancelCapacityReservationFleets(*ec2.CancelCapacityReservationFleetsInput) (*ec2.CancelCapacityReservationFleetsOutput, error)
+ CancelCapacityReservationFleetsWithContext(aws.Context, *ec2.CancelCapacityReservationFleetsInput, ...request.Option) (*ec2.CancelCapacityReservationFleetsOutput, error)
+ CancelCapacityReservationFleetsRequest(*ec2.CancelCapacityReservationFleetsInput) (*request.Request, *ec2.CancelCapacityReservationFleetsOutput)
+
+ CancelConversionTask(*ec2.CancelConversionTaskInput) (*ec2.CancelConversionTaskOutput, error)
+ CancelConversionTaskWithContext(aws.Context, *ec2.CancelConversionTaskInput, ...request.Option) (*ec2.CancelConversionTaskOutput, error)
+ CancelConversionTaskRequest(*ec2.CancelConversionTaskInput) (*request.Request, *ec2.CancelConversionTaskOutput)
+
+ CancelExportTask(*ec2.CancelExportTaskInput) (*ec2.CancelExportTaskOutput, error)
+ CancelExportTaskWithContext(aws.Context, *ec2.CancelExportTaskInput, ...request.Option) (*ec2.CancelExportTaskOutput, error)
+ CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput)
+
+ CancelImageLaunchPermission(*ec2.CancelImageLaunchPermissionInput) (*ec2.CancelImageLaunchPermissionOutput, error)
+ CancelImageLaunchPermissionWithContext(aws.Context, *ec2.CancelImageLaunchPermissionInput, ...request.Option) (*ec2.CancelImageLaunchPermissionOutput, error)
+ CancelImageLaunchPermissionRequest(*ec2.CancelImageLaunchPermissionInput) (*request.Request, *ec2.CancelImageLaunchPermissionOutput)
+
+ CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error)
+ CancelImportTaskWithContext(aws.Context, *ec2.CancelImportTaskInput, ...request.Option) (*ec2.CancelImportTaskOutput, error)
+ CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput)
+
+ CancelReservedInstancesListing(*ec2.CancelReservedInstancesListingInput) (*ec2.CancelReservedInstancesListingOutput, error)
+ CancelReservedInstancesListingWithContext(aws.Context, *ec2.CancelReservedInstancesListingInput, ...request.Option) (*ec2.CancelReservedInstancesListingOutput, error)
+ CancelReservedInstancesListingRequest(*ec2.CancelReservedInstancesListingInput) (*request.Request, *ec2.CancelReservedInstancesListingOutput)
+
+ CancelSpotFleetRequests(*ec2.CancelSpotFleetRequestsInput) (*ec2.CancelSpotFleetRequestsOutput, error)
+ CancelSpotFleetRequestsWithContext(aws.Context, *ec2.CancelSpotFleetRequestsInput, ...request.Option) (*ec2.CancelSpotFleetRequestsOutput, error)
+ CancelSpotFleetRequestsRequest(*ec2.CancelSpotFleetRequestsInput) (*request.Request, *ec2.CancelSpotFleetRequestsOutput)
+
+ CancelSpotInstanceRequests(*ec2.CancelSpotInstanceRequestsInput) (*ec2.CancelSpotInstanceRequestsOutput, error)
+ CancelSpotInstanceRequestsWithContext(aws.Context, *ec2.CancelSpotInstanceRequestsInput, ...request.Option) (*ec2.CancelSpotInstanceRequestsOutput, error)
+ CancelSpotInstanceRequestsRequest(*ec2.CancelSpotInstanceRequestsInput) (*request.Request, *ec2.CancelSpotInstanceRequestsOutput)
+
+ ConfirmProductInstance(*ec2.ConfirmProductInstanceInput) (*ec2.ConfirmProductInstanceOutput, error)
+ ConfirmProductInstanceWithContext(aws.Context, *ec2.ConfirmProductInstanceInput, ...request.Option) (*ec2.ConfirmProductInstanceOutput, error)
+ ConfirmProductInstanceRequest(*ec2.ConfirmProductInstanceInput) (*request.Request, *ec2.ConfirmProductInstanceOutput)
+
+ CopyFpgaImage(*ec2.CopyFpgaImageInput) (*ec2.CopyFpgaImageOutput, error)
+ CopyFpgaImageWithContext(aws.Context, *ec2.CopyFpgaImageInput, ...request.Option) (*ec2.CopyFpgaImageOutput, error)
+ CopyFpgaImageRequest(*ec2.CopyFpgaImageInput) (*request.Request, *ec2.CopyFpgaImageOutput)
+
+ CopyImage(*ec2.CopyImageInput) (*ec2.CopyImageOutput, error)
+ CopyImageWithContext(aws.Context, *ec2.CopyImageInput, ...request.Option) (*ec2.CopyImageOutput, error)
+ CopyImageRequest(*ec2.CopyImageInput) (*request.Request, *ec2.CopyImageOutput)
+
+ CopySnapshot(*ec2.CopySnapshotInput) (*ec2.CopySnapshotOutput, error)
+ CopySnapshotWithContext(aws.Context, *ec2.CopySnapshotInput, ...request.Option) (*ec2.CopySnapshotOutput, error)
+ CopySnapshotRequest(*ec2.CopySnapshotInput) (*request.Request, *ec2.CopySnapshotOutput)
+
+ CreateCapacityReservation(*ec2.CreateCapacityReservationInput) (*ec2.CreateCapacityReservationOutput, error)
+ CreateCapacityReservationWithContext(aws.Context, *ec2.CreateCapacityReservationInput, ...request.Option) (*ec2.CreateCapacityReservationOutput, error)
+ CreateCapacityReservationRequest(*ec2.CreateCapacityReservationInput) (*request.Request, *ec2.CreateCapacityReservationOutput)
+
+ CreateCapacityReservationFleet(*ec2.CreateCapacityReservationFleetInput) (*ec2.CreateCapacityReservationFleetOutput, error)
+ CreateCapacityReservationFleetWithContext(aws.Context, *ec2.CreateCapacityReservationFleetInput, ...request.Option) (*ec2.CreateCapacityReservationFleetOutput, error)
+ CreateCapacityReservationFleetRequest(*ec2.CreateCapacityReservationFleetInput) (*request.Request, *ec2.CreateCapacityReservationFleetOutput)
+
+ CreateCarrierGateway(*ec2.CreateCarrierGatewayInput) (*ec2.CreateCarrierGatewayOutput, error)
+ CreateCarrierGatewayWithContext(aws.Context, *ec2.CreateCarrierGatewayInput, ...request.Option) (*ec2.CreateCarrierGatewayOutput, error)
+ CreateCarrierGatewayRequest(*ec2.CreateCarrierGatewayInput) (*request.Request, *ec2.CreateCarrierGatewayOutput)
+
+ CreateClientVpnEndpoint(*ec2.CreateClientVpnEndpointInput) (*ec2.CreateClientVpnEndpointOutput, error)
+ CreateClientVpnEndpointWithContext(aws.Context, *ec2.CreateClientVpnEndpointInput, ...request.Option) (*ec2.CreateClientVpnEndpointOutput, error)
+ CreateClientVpnEndpointRequest(*ec2.CreateClientVpnEndpointInput) (*request.Request, *ec2.CreateClientVpnEndpointOutput)
+
+ CreateClientVpnRoute(*ec2.CreateClientVpnRouteInput) (*ec2.CreateClientVpnRouteOutput, error)
+ CreateClientVpnRouteWithContext(aws.Context, *ec2.CreateClientVpnRouteInput, ...request.Option) (*ec2.CreateClientVpnRouteOutput, error)
+ CreateClientVpnRouteRequest(*ec2.CreateClientVpnRouteInput) (*request.Request, *ec2.CreateClientVpnRouteOutput)
+
+ CreateCoipCidr(*ec2.CreateCoipCidrInput) (*ec2.CreateCoipCidrOutput, error)
+ CreateCoipCidrWithContext(aws.Context, *ec2.CreateCoipCidrInput, ...request.Option) (*ec2.CreateCoipCidrOutput, error)
+ CreateCoipCidrRequest(*ec2.CreateCoipCidrInput) (*request.Request, *ec2.CreateCoipCidrOutput)
+
+ CreateCoipPool(*ec2.CreateCoipPoolInput) (*ec2.CreateCoipPoolOutput, error)
+ CreateCoipPoolWithContext(aws.Context, *ec2.CreateCoipPoolInput, ...request.Option) (*ec2.CreateCoipPoolOutput, error)
+ CreateCoipPoolRequest(*ec2.CreateCoipPoolInput) (*request.Request, *ec2.CreateCoipPoolOutput)
+
+ CreateCustomerGateway(*ec2.CreateCustomerGatewayInput) (*ec2.CreateCustomerGatewayOutput, error)
+ CreateCustomerGatewayWithContext(aws.Context, *ec2.CreateCustomerGatewayInput, ...request.Option) (*ec2.CreateCustomerGatewayOutput, error)
+ CreateCustomerGatewayRequest(*ec2.CreateCustomerGatewayInput) (*request.Request, *ec2.CreateCustomerGatewayOutput)
+
+ CreateDefaultSubnet(*ec2.CreateDefaultSubnetInput) (*ec2.CreateDefaultSubnetOutput, error)
+ CreateDefaultSubnetWithContext(aws.Context, *ec2.CreateDefaultSubnetInput, ...request.Option) (*ec2.CreateDefaultSubnetOutput, error)
+ CreateDefaultSubnetRequest(*ec2.CreateDefaultSubnetInput) (*request.Request, *ec2.CreateDefaultSubnetOutput)
+
+ CreateDefaultVpc(*ec2.CreateDefaultVpcInput) (*ec2.CreateDefaultVpcOutput, error)
+ CreateDefaultVpcWithContext(aws.Context, *ec2.CreateDefaultVpcInput, ...request.Option) (*ec2.CreateDefaultVpcOutput, error)
+ CreateDefaultVpcRequest(*ec2.CreateDefaultVpcInput) (*request.Request, *ec2.CreateDefaultVpcOutput)
+
+ CreateDhcpOptions(*ec2.CreateDhcpOptionsInput) (*ec2.CreateDhcpOptionsOutput, error)
+ CreateDhcpOptionsWithContext(aws.Context, *ec2.CreateDhcpOptionsInput, ...request.Option) (*ec2.CreateDhcpOptionsOutput, error)
+ CreateDhcpOptionsRequest(*ec2.CreateDhcpOptionsInput) (*request.Request, *ec2.CreateDhcpOptionsOutput)
+
+ CreateEgressOnlyInternetGateway(*ec2.CreateEgressOnlyInternetGatewayInput) (*ec2.CreateEgressOnlyInternetGatewayOutput, error)
+ CreateEgressOnlyInternetGatewayWithContext(aws.Context, *ec2.CreateEgressOnlyInternetGatewayInput, ...request.Option) (*ec2.CreateEgressOnlyInternetGatewayOutput, error)
+ CreateEgressOnlyInternetGatewayRequest(*ec2.CreateEgressOnlyInternetGatewayInput) (*request.Request, *ec2.CreateEgressOnlyInternetGatewayOutput)
+
+ CreateFleet(*ec2.CreateFleetInput) (*ec2.CreateFleetOutput, error)
+ CreateFleetWithContext(aws.Context, *ec2.CreateFleetInput, ...request.Option) (*ec2.CreateFleetOutput, error)
+ CreateFleetRequest(*ec2.CreateFleetInput) (*request.Request, *ec2.CreateFleetOutput)
+
+ CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error)
+ CreateFlowLogsWithContext(aws.Context, *ec2.CreateFlowLogsInput, ...request.Option) (*ec2.CreateFlowLogsOutput, error)
+ CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput)
+
+ CreateFpgaImage(*ec2.CreateFpgaImageInput) (*ec2.CreateFpgaImageOutput, error)
+ CreateFpgaImageWithContext(aws.Context, *ec2.CreateFpgaImageInput, ...request.Option) (*ec2.CreateFpgaImageOutput, error)
+ CreateFpgaImageRequest(*ec2.CreateFpgaImageInput) (*request.Request, *ec2.CreateFpgaImageOutput)
+
+ CreateImage(*ec2.CreateImageInput) (*ec2.CreateImageOutput, error)
+ CreateImageWithContext(aws.Context, *ec2.CreateImageInput, ...request.Option) (*ec2.CreateImageOutput, error)
+ CreateImageRequest(*ec2.CreateImageInput) (*request.Request, *ec2.CreateImageOutput)
+
+ CreateInstanceEventWindow(*ec2.CreateInstanceEventWindowInput) (*ec2.CreateInstanceEventWindowOutput, error)
+ CreateInstanceEventWindowWithContext(aws.Context, *ec2.CreateInstanceEventWindowInput, ...request.Option) (*ec2.CreateInstanceEventWindowOutput, error)
+ CreateInstanceEventWindowRequest(*ec2.CreateInstanceEventWindowInput) (*request.Request, *ec2.CreateInstanceEventWindowOutput)
+
+ CreateInstanceExportTask(*ec2.CreateInstanceExportTaskInput) (*ec2.CreateInstanceExportTaskOutput, error)
+ CreateInstanceExportTaskWithContext(aws.Context, *ec2.CreateInstanceExportTaskInput, ...request.Option) (*ec2.CreateInstanceExportTaskOutput, error)
+ CreateInstanceExportTaskRequest(*ec2.CreateInstanceExportTaskInput) (*request.Request, *ec2.CreateInstanceExportTaskOutput)
+
+ CreateInternetGateway(*ec2.CreateInternetGatewayInput) (*ec2.CreateInternetGatewayOutput, error)
+ CreateInternetGatewayWithContext(aws.Context, *ec2.CreateInternetGatewayInput, ...request.Option) (*ec2.CreateInternetGatewayOutput, error)
+ CreateInternetGatewayRequest(*ec2.CreateInternetGatewayInput) (*request.Request, *ec2.CreateInternetGatewayOutput)
+
+ CreateIpam(*ec2.CreateIpamInput) (*ec2.CreateIpamOutput, error)
+ CreateIpamWithContext(aws.Context, *ec2.CreateIpamInput, ...request.Option) (*ec2.CreateIpamOutput, error)
+ CreateIpamRequest(*ec2.CreateIpamInput) (*request.Request, *ec2.CreateIpamOutput)
+
+ CreateIpamPool(*ec2.CreateIpamPoolInput) (*ec2.CreateIpamPoolOutput, error)
+ CreateIpamPoolWithContext(aws.Context, *ec2.CreateIpamPoolInput, ...request.Option) (*ec2.CreateIpamPoolOutput, error)
+ CreateIpamPoolRequest(*ec2.CreateIpamPoolInput) (*request.Request, *ec2.CreateIpamPoolOutput)
+
+ CreateIpamResourceDiscovery(*ec2.CreateIpamResourceDiscoveryInput) (*ec2.CreateIpamResourceDiscoveryOutput, error)
+ CreateIpamResourceDiscoveryWithContext(aws.Context, *ec2.CreateIpamResourceDiscoveryInput, ...request.Option) (*ec2.CreateIpamResourceDiscoveryOutput, error)
+ CreateIpamResourceDiscoveryRequest(*ec2.CreateIpamResourceDiscoveryInput) (*request.Request, *ec2.CreateIpamResourceDiscoveryOutput)
+
+ CreateIpamScope(*ec2.CreateIpamScopeInput) (*ec2.CreateIpamScopeOutput, error)
+ CreateIpamScopeWithContext(aws.Context, *ec2.CreateIpamScopeInput, ...request.Option) (*ec2.CreateIpamScopeOutput, error)
+ CreateIpamScopeRequest(*ec2.CreateIpamScopeInput) (*request.Request, *ec2.CreateIpamScopeOutput)
+
+ CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error)
+ CreateKeyPairWithContext(aws.Context, *ec2.CreateKeyPairInput, ...request.Option) (*ec2.CreateKeyPairOutput, error)
+ CreateKeyPairRequest(*ec2.CreateKeyPairInput) (*request.Request, *ec2.CreateKeyPairOutput)
+
+ CreateLaunchTemplate(*ec2.CreateLaunchTemplateInput) (*ec2.CreateLaunchTemplateOutput, error)
+ CreateLaunchTemplateWithContext(aws.Context, *ec2.CreateLaunchTemplateInput, ...request.Option) (*ec2.CreateLaunchTemplateOutput, error)
+ CreateLaunchTemplateRequest(*ec2.CreateLaunchTemplateInput) (*request.Request, *ec2.CreateLaunchTemplateOutput)
+
+ CreateLaunchTemplateVersion(*ec2.CreateLaunchTemplateVersionInput) (*ec2.CreateLaunchTemplateVersionOutput, error)
+ CreateLaunchTemplateVersionWithContext(aws.Context, *ec2.CreateLaunchTemplateVersionInput, ...request.Option) (*ec2.CreateLaunchTemplateVersionOutput, error)
+ CreateLaunchTemplateVersionRequest(*ec2.CreateLaunchTemplateVersionInput) (*request.Request, *ec2.CreateLaunchTemplateVersionOutput)
+
+ CreateLocalGatewayRoute(*ec2.CreateLocalGatewayRouteInput) (*ec2.CreateLocalGatewayRouteOutput, error)
+ CreateLocalGatewayRouteWithContext(aws.Context, *ec2.CreateLocalGatewayRouteInput, ...request.Option) (*ec2.CreateLocalGatewayRouteOutput, error)
+ CreateLocalGatewayRouteRequest(*ec2.CreateLocalGatewayRouteInput) (*request.Request, *ec2.CreateLocalGatewayRouteOutput)
+
+ CreateLocalGatewayRouteTable(*ec2.CreateLocalGatewayRouteTableInput) (*ec2.CreateLocalGatewayRouteTableOutput, error)
+ CreateLocalGatewayRouteTableWithContext(aws.Context, *ec2.CreateLocalGatewayRouteTableInput, ...request.Option) (*ec2.CreateLocalGatewayRouteTableOutput, error)
+ CreateLocalGatewayRouteTableRequest(*ec2.CreateLocalGatewayRouteTableInput) (*request.Request, *ec2.CreateLocalGatewayRouteTableOutput)
+
+ CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation(*ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) (*ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput, error)
+ CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationWithContext(aws.Context, *ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput, ...request.Option) (*ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput, error)
+ CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationRequest(*ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) (*request.Request, *ec2.CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput)
+
+ CreateLocalGatewayRouteTableVpcAssociation(*ec2.CreateLocalGatewayRouteTableVpcAssociationInput) (*ec2.CreateLocalGatewayRouteTableVpcAssociationOutput, error)
+ CreateLocalGatewayRouteTableVpcAssociationWithContext(aws.Context, *ec2.CreateLocalGatewayRouteTableVpcAssociationInput, ...request.Option) (*ec2.CreateLocalGatewayRouteTableVpcAssociationOutput, error)
+ CreateLocalGatewayRouteTableVpcAssociationRequest(*ec2.CreateLocalGatewayRouteTableVpcAssociationInput) (*request.Request, *ec2.CreateLocalGatewayRouteTableVpcAssociationOutput)
+
+ CreateManagedPrefixList(*ec2.CreateManagedPrefixListInput) (*ec2.CreateManagedPrefixListOutput, error)
+ CreateManagedPrefixListWithContext(aws.Context, *ec2.CreateManagedPrefixListInput, ...request.Option) (*ec2.CreateManagedPrefixListOutput, error)
+ CreateManagedPrefixListRequest(*ec2.CreateManagedPrefixListInput) (*request.Request, *ec2.CreateManagedPrefixListOutput)
+
+ CreateNatGateway(*ec2.CreateNatGatewayInput) (*ec2.CreateNatGatewayOutput, error)
+ CreateNatGatewayWithContext(aws.Context, *ec2.CreateNatGatewayInput, ...request.Option) (*ec2.CreateNatGatewayOutput, error)
+ CreateNatGatewayRequest(*ec2.CreateNatGatewayInput) (*request.Request, *ec2.CreateNatGatewayOutput)
+
+ CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error)
+ CreateNetworkAclWithContext(aws.Context, *ec2.CreateNetworkAclInput, ...request.Option) (*ec2.CreateNetworkAclOutput, error)
+ CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput)
+
+ CreateNetworkAclEntry(*ec2.CreateNetworkAclEntryInput) (*ec2.CreateNetworkAclEntryOutput, error)
+ CreateNetworkAclEntryWithContext(aws.Context, *ec2.CreateNetworkAclEntryInput, ...request.Option) (*ec2.CreateNetworkAclEntryOutput, error)
+ CreateNetworkAclEntryRequest(*ec2.CreateNetworkAclEntryInput) (*request.Request, *ec2.CreateNetworkAclEntryOutput)
+
+ CreateNetworkInsightsAccessScope(*ec2.CreateNetworkInsightsAccessScopeInput) (*ec2.CreateNetworkInsightsAccessScopeOutput, error)
+ CreateNetworkInsightsAccessScopeWithContext(aws.Context, *ec2.CreateNetworkInsightsAccessScopeInput, ...request.Option) (*ec2.CreateNetworkInsightsAccessScopeOutput, error)
+ CreateNetworkInsightsAccessScopeRequest(*ec2.CreateNetworkInsightsAccessScopeInput) (*request.Request, *ec2.CreateNetworkInsightsAccessScopeOutput)
+
+ CreateNetworkInsightsPath(*ec2.CreateNetworkInsightsPathInput) (*ec2.CreateNetworkInsightsPathOutput, error)
+ CreateNetworkInsightsPathWithContext(aws.Context, *ec2.CreateNetworkInsightsPathInput, ...request.Option) (*ec2.CreateNetworkInsightsPathOutput, error)
+ CreateNetworkInsightsPathRequest(*ec2.CreateNetworkInsightsPathInput) (*request.Request, *ec2.CreateNetworkInsightsPathOutput)
+
+ CreateNetworkInterface(*ec2.CreateNetworkInterfaceInput) (*ec2.CreateNetworkInterfaceOutput, error)
+ CreateNetworkInterfaceWithContext(aws.Context, *ec2.CreateNetworkInterfaceInput, ...request.Option) (*ec2.CreateNetworkInterfaceOutput, error)
+ CreateNetworkInterfaceRequest(*ec2.CreateNetworkInterfaceInput) (*request.Request, *ec2.CreateNetworkInterfaceOutput)
+
+ CreateNetworkInterfacePermission(*ec2.CreateNetworkInterfacePermissionInput) (*ec2.CreateNetworkInterfacePermissionOutput, error)
+ CreateNetworkInterfacePermissionWithContext(aws.Context, *ec2.CreateNetworkInterfacePermissionInput, ...request.Option) (*ec2.CreateNetworkInterfacePermissionOutput, error)
+ CreateNetworkInterfacePermissionRequest(*ec2.CreateNetworkInterfacePermissionInput) (*request.Request, *ec2.CreateNetworkInterfacePermissionOutput)
+
+ CreatePlacementGroup(*ec2.CreatePlacementGroupInput) (*ec2.CreatePlacementGroupOutput, error)
+ CreatePlacementGroupWithContext(aws.Context, *ec2.CreatePlacementGroupInput, ...request.Option) (*ec2.CreatePlacementGroupOutput, error)
+ CreatePlacementGroupRequest(*ec2.CreatePlacementGroupInput) (*request.Request, *ec2.CreatePlacementGroupOutput)
+
+ CreatePublicIpv4Pool(*ec2.CreatePublicIpv4PoolInput) (*ec2.CreatePublicIpv4PoolOutput, error)
+ CreatePublicIpv4PoolWithContext(aws.Context, *ec2.CreatePublicIpv4PoolInput, ...request.Option) (*ec2.CreatePublicIpv4PoolOutput, error)
+ CreatePublicIpv4PoolRequest(*ec2.CreatePublicIpv4PoolInput) (*request.Request, *ec2.CreatePublicIpv4PoolOutput)
+
+ CreateReplaceRootVolumeTask(*ec2.CreateReplaceRootVolumeTaskInput) (*ec2.CreateReplaceRootVolumeTaskOutput, error)
+ CreateReplaceRootVolumeTaskWithContext(aws.Context, *ec2.CreateReplaceRootVolumeTaskInput, ...request.Option) (*ec2.CreateReplaceRootVolumeTaskOutput, error)
+ CreateReplaceRootVolumeTaskRequest(*ec2.CreateReplaceRootVolumeTaskInput) (*request.Request, *ec2.CreateReplaceRootVolumeTaskOutput)
+
+ CreateReservedInstancesListing(*ec2.CreateReservedInstancesListingInput) (*ec2.CreateReservedInstancesListingOutput, error)
+ CreateReservedInstancesListingWithContext(aws.Context, *ec2.CreateReservedInstancesListingInput, ...request.Option) (*ec2.CreateReservedInstancesListingOutput, error)
+ CreateReservedInstancesListingRequest(*ec2.CreateReservedInstancesListingInput) (*request.Request, *ec2.CreateReservedInstancesListingOutput)
+
+ CreateRestoreImageTask(*ec2.CreateRestoreImageTaskInput) (*ec2.CreateRestoreImageTaskOutput, error)
+ CreateRestoreImageTaskWithContext(aws.Context, *ec2.CreateRestoreImageTaskInput, ...request.Option) (*ec2.CreateRestoreImageTaskOutput, error)
+ CreateRestoreImageTaskRequest(*ec2.CreateRestoreImageTaskInput) (*request.Request, *ec2.CreateRestoreImageTaskOutput)
+
+ CreateRoute(*ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error)
+ CreateRouteWithContext(aws.Context, *ec2.CreateRouteInput, ...request.Option) (*ec2.CreateRouteOutput, error)
+ CreateRouteRequest(*ec2.CreateRouteInput) (*request.Request, *ec2.CreateRouteOutput)
+
+ CreateRouteTable(*ec2.CreateRouteTableInput) (*ec2.CreateRouteTableOutput, error)
+ CreateRouteTableWithContext(aws.Context, *ec2.CreateRouteTableInput, ...request.Option) (*ec2.CreateRouteTableOutput, error)
+ CreateRouteTableRequest(*ec2.CreateRouteTableInput) (*request.Request, *ec2.CreateRouteTableOutput)
+
+ CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
+ CreateSecurityGroupWithContext(aws.Context, *ec2.CreateSecurityGroupInput, ...request.Option) (*ec2.CreateSecurityGroupOutput, error)
+ CreateSecurityGroupRequest(*ec2.CreateSecurityGroupInput) (*request.Request, *ec2.CreateSecurityGroupOutput)
+
+ CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error)
+ CreateSnapshotWithContext(aws.Context, *ec2.CreateSnapshotInput, ...request.Option) (*ec2.Snapshot, error)
+ CreateSnapshotRequest(*ec2.CreateSnapshotInput) (*request.Request, *ec2.Snapshot)
+
+ CreateSnapshots(*ec2.CreateSnapshotsInput) (*ec2.CreateSnapshotsOutput, error)
+ CreateSnapshotsWithContext(aws.Context, *ec2.CreateSnapshotsInput, ...request.Option) (*ec2.CreateSnapshotsOutput, error)
+ CreateSnapshotsRequest(*ec2.CreateSnapshotsInput) (*request.Request, *ec2.CreateSnapshotsOutput)
+
+ CreateSpotDatafeedSubscription(*ec2.CreateSpotDatafeedSubscriptionInput) (*ec2.CreateSpotDatafeedSubscriptionOutput, error)
+ CreateSpotDatafeedSubscriptionWithContext(aws.Context, *ec2.CreateSpotDatafeedSubscriptionInput, ...request.Option) (*ec2.CreateSpotDatafeedSubscriptionOutput, error)
+ CreateSpotDatafeedSubscriptionRequest(*ec2.CreateSpotDatafeedSubscriptionInput) (*request.Request, *ec2.CreateSpotDatafeedSubscriptionOutput)
+
+ CreateStoreImageTask(*ec2.CreateStoreImageTaskInput) (*ec2.CreateStoreImageTaskOutput, error)
+ CreateStoreImageTaskWithContext(aws.Context, *ec2.CreateStoreImageTaskInput, ...request.Option) (*ec2.CreateStoreImageTaskOutput, error)
+ CreateStoreImageTaskRequest(*ec2.CreateStoreImageTaskInput) (*request.Request, *ec2.CreateStoreImageTaskOutput)
+
+ CreateSubnet(*ec2.CreateSubnetInput) (*ec2.CreateSubnetOutput, error)
+ CreateSubnetWithContext(aws.Context, *ec2.CreateSubnetInput, ...request.Option) (*ec2.CreateSubnetOutput, error)
+ CreateSubnetRequest(*ec2.CreateSubnetInput) (*request.Request, *ec2.CreateSubnetOutput)
+
+ CreateSubnetCidrReservation(*ec2.CreateSubnetCidrReservationInput) (*ec2.CreateSubnetCidrReservationOutput, error)
+ CreateSubnetCidrReservationWithContext(aws.Context, *ec2.CreateSubnetCidrReservationInput, ...request.Option) (*ec2.CreateSubnetCidrReservationOutput, error)
+ CreateSubnetCidrReservationRequest(*ec2.CreateSubnetCidrReservationInput) (*request.Request, *ec2.CreateSubnetCidrReservationOutput)
+
+ CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
+ CreateTagsWithContext(aws.Context, *ec2.CreateTagsInput, ...request.Option) (*ec2.CreateTagsOutput, error)
+ CreateTagsRequest(*ec2.CreateTagsInput) (*request.Request, *ec2.CreateTagsOutput)
+
+ CreateTrafficMirrorFilter(*ec2.CreateTrafficMirrorFilterInput) (*ec2.CreateTrafficMirrorFilterOutput, error)
+ CreateTrafficMirrorFilterWithContext(aws.Context, *ec2.CreateTrafficMirrorFilterInput, ...request.Option) (*ec2.CreateTrafficMirrorFilterOutput, error)
+ CreateTrafficMirrorFilterRequest(*ec2.CreateTrafficMirrorFilterInput) (*request.Request, *ec2.CreateTrafficMirrorFilterOutput)
+
+ CreateTrafficMirrorFilterRule(*ec2.CreateTrafficMirrorFilterRuleInput) (*ec2.CreateTrafficMirrorFilterRuleOutput, error)
+ CreateTrafficMirrorFilterRuleWithContext(aws.Context, *ec2.CreateTrafficMirrorFilterRuleInput, ...request.Option) (*ec2.CreateTrafficMirrorFilterRuleOutput, error)
+ CreateTrafficMirrorFilterRuleRequest(*ec2.CreateTrafficMirrorFilterRuleInput) (*request.Request, *ec2.CreateTrafficMirrorFilterRuleOutput)
+
+ CreateTrafficMirrorSession(*ec2.CreateTrafficMirrorSessionInput) (*ec2.CreateTrafficMirrorSessionOutput, error)
+ CreateTrafficMirrorSessionWithContext(aws.Context, *ec2.CreateTrafficMirrorSessionInput, ...request.Option) (*ec2.CreateTrafficMirrorSessionOutput, error)
+ CreateTrafficMirrorSessionRequest(*ec2.CreateTrafficMirrorSessionInput) (*request.Request, *ec2.CreateTrafficMirrorSessionOutput)
+
+ CreateTrafficMirrorTarget(*ec2.CreateTrafficMirrorTargetInput) (*ec2.CreateTrafficMirrorTargetOutput, error)
+ CreateTrafficMirrorTargetWithContext(aws.Context, *ec2.CreateTrafficMirrorTargetInput, ...request.Option) (*ec2.CreateTrafficMirrorTargetOutput, error)
+ CreateTrafficMirrorTargetRequest(*ec2.CreateTrafficMirrorTargetInput) (*request.Request, *ec2.CreateTrafficMirrorTargetOutput)
+
+ CreateTransitGateway(*ec2.CreateTransitGatewayInput) (*ec2.CreateTransitGatewayOutput, error)
+ CreateTransitGatewayWithContext(aws.Context, *ec2.CreateTransitGatewayInput, ...request.Option) (*ec2.CreateTransitGatewayOutput, error)
+ CreateTransitGatewayRequest(*ec2.CreateTransitGatewayInput) (*request.Request, *ec2.CreateTransitGatewayOutput)
+
+ CreateTransitGatewayConnect(*ec2.CreateTransitGatewayConnectInput) (*ec2.CreateTransitGatewayConnectOutput, error)
+ CreateTransitGatewayConnectWithContext(aws.Context, *ec2.CreateTransitGatewayConnectInput, ...request.Option) (*ec2.CreateTransitGatewayConnectOutput, error)
+ CreateTransitGatewayConnectRequest(*ec2.CreateTransitGatewayConnectInput) (*request.Request, *ec2.CreateTransitGatewayConnectOutput)
+
+ CreateTransitGatewayConnectPeer(*ec2.CreateTransitGatewayConnectPeerInput) (*ec2.CreateTransitGatewayConnectPeerOutput, error)
+ CreateTransitGatewayConnectPeerWithContext(aws.Context, *ec2.CreateTransitGatewayConnectPeerInput, ...request.Option) (*ec2.CreateTransitGatewayConnectPeerOutput, error)
+ CreateTransitGatewayConnectPeerRequest(*ec2.CreateTransitGatewayConnectPeerInput) (*request.Request, *ec2.CreateTransitGatewayConnectPeerOutput)
+
+ CreateTransitGatewayMulticastDomain(*ec2.CreateTransitGatewayMulticastDomainInput) (*ec2.CreateTransitGatewayMulticastDomainOutput, error)
+ CreateTransitGatewayMulticastDomainWithContext(aws.Context, *ec2.CreateTransitGatewayMulticastDomainInput, ...request.Option) (*ec2.CreateTransitGatewayMulticastDomainOutput, error)
+ CreateTransitGatewayMulticastDomainRequest(*ec2.CreateTransitGatewayMulticastDomainInput) (*request.Request, *ec2.CreateTransitGatewayMulticastDomainOutput)
+
+ CreateTransitGatewayPeeringAttachment(*ec2.CreateTransitGatewayPeeringAttachmentInput) (*ec2.CreateTransitGatewayPeeringAttachmentOutput, error)
+ CreateTransitGatewayPeeringAttachmentWithContext(aws.Context, *ec2.CreateTransitGatewayPeeringAttachmentInput, ...request.Option) (*ec2.CreateTransitGatewayPeeringAttachmentOutput, error)
+ CreateTransitGatewayPeeringAttachmentRequest(*ec2.CreateTransitGatewayPeeringAttachmentInput) (*request.Request, *ec2.CreateTransitGatewayPeeringAttachmentOutput)
+
+ CreateTransitGatewayPolicyTable(*ec2.CreateTransitGatewayPolicyTableInput) (*ec2.CreateTransitGatewayPolicyTableOutput, error)
+ CreateTransitGatewayPolicyTableWithContext(aws.Context, *ec2.CreateTransitGatewayPolicyTableInput, ...request.Option) (*ec2.CreateTransitGatewayPolicyTableOutput, error)
+ CreateTransitGatewayPolicyTableRequest(*ec2.CreateTransitGatewayPolicyTableInput) (*request.Request, *ec2.CreateTransitGatewayPolicyTableOutput)
+
+ CreateTransitGatewayPrefixListReference(*ec2.CreateTransitGatewayPrefixListReferenceInput) (*ec2.CreateTransitGatewayPrefixListReferenceOutput, error)
+ CreateTransitGatewayPrefixListReferenceWithContext(aws.Context, *ec2.CreateTransitGatewayPrefixListReferenceInput, ...request.Option) (*ec2.CreateTransitGatewayPrefixListReferenceOutput, error)
+ CreateTransitGatewayPrefixListReferenceRequest(*ec2.CreateTransitGatewayPrefixListReferenceInput) (*request.Request, *ec2.CreateTransitGatewayPrefixListReferenceOutput)
+
+ CreateTransitGatewayRoute(*ec2.CreateTransitGatewayRouteInput) (*ec2.CreateTransitGatewayRouteOutput, error)
+ CreateTransitGatewayRouteWithContext(aws.Context, *ec2.CreateTransitGatewayRouteInput, ...request.Option) (*ec2.CreateTransitGatewayRouteOutput, error)
+ CreateTransitGatewayRouteRequest(*ec2.CreateTransitGatewayRouteInput) (*request.Request, *ec2.CreateTransitGatewayRouteOutput)
+
+ CreateTransitGatewayRouteTable(*ec2.CreateTransitGatewayRouteTableInput) (*ec2.CreateTransitGatewayRouteTableOutput, error)
+ CreateTransitGatewayRouteTableWithContext(aws.Context, *ec2.CreateTransitGatewayRouteTableInput, ...request.Option) (*ec2.CreateTransitGatewayRouteTableOutput, error)
+ CreateTransitGatewayRouteTableRequest(*ec2.CreateTransitGatewayRouteTableInput) (*request.Request, *ec2.CreateTransitGatewayRouteTableOutput)
+
+ CreateTransitGatewayRouteTableAnnouncement(*ec2.CreateTransitGatewayRouteTableAnnouncementInput) (*ec2.CreateTransitGatewayRouteTableAnnouncementOutput, error)
+ CreateTransitGatewayRouteTableAnnouncementWithContext(aws.Context, *ec2.CreateTransitGatewayRouteTableAnnouncementInput, ...request.Option) (*ec2.CreateTransitGatewayRouteTableAnnouncementOutput, error)
+ CreateTransitGatewayRouteTableAnnouncementRequest(*ec2.CreateTransitGatewayRouteTableAnnouncementInput) (*request.Request, *ec2.CreateTransitGatewayRouteTableAnnouncementOutput)
+
+ CreateTransitGatewayVpcAttachment(*ec2.CreateTransitGatewayVpcAttachmentInput) (*ec2.CreateTransitGatewayVpcAttachmentOutput, error)
+ CreateTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.CreateTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.CreateTransitGatewayVpcAttachmentOutput, error)
+ CreateTransitGatewayVpcAttachmentRequest(*ec2.CreateTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.CreateTransitGatewayVpcAttachmentOutput)
+
+ CreateVerifiedAccessEndpoint(*ec2.CreateVerifiedAccessEndpointInput) (*ec2.CreateVerifiedAccessEndpointOutput, error)
+ CreateVerifiedAccessEndpointWithContext(aws.Context, *ec2.CreateVerifiedAccessEndpointInput, ...request.Option) (*ec2.CreateVerifiedAccessEndpointOutput, error)
+ CreateVerifiedAccessEndpointRequest(*ec2.CreateVerifiedAccessEndpointInput) (*request.Request, *ec2.CreateVerifiedAccessEndpointOutput)
+
+ CreateVerifiedAccessGroup(*ec2.CreateVerifiedAccessGroupInput) (*ec2.CreateVerifiedAccessGroupOutput, error)
+ CreateVerifiedAccessGroupWithContext(aws.Context, *ec2.CreateVerifiedAccessGroupInput, ...request.Option) (*ec2.CreateVerifiedAccessGroupOutput, error)
+ CreateVerifiedAccessGroupRequest(*ec2.CreateVerifiedAccessGroupInput) (*request.Request, *ec2.CreateVerifiedAccessGroupOutput)
+
+ CreateVerifiedAccessInstance(*ec2.CreateVerifiedAccessInstanceInput) (*ec2.CreateVerifiedAccessInstanceOutput, error)
+ CreateVerifiedAccessInstanceWithContext(aws.Context, *ec2.CreateVerifiedAccessInstanceInput, ...request.Option) (*ec2.CreateVerifiedAccessInstanceOutput, error)
+ CreateVerifiedAccessInstanceRequest(*ec2.CreateVerifiedAccessInstanceInput) (*request.Request, *ec2.CreateVerifiedAccessInstanceOutput)
+
+ CreateVerifiedAccessTrustProvider(*ec2.CreateVerifiedAccessTrustProviderInput) (*ec2.CreateVerifiedAccessTrustProviderOutput, error)
+ CreateVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.CreateVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.CreateVerifiedAccessTrustProviderOutput, error)
+ CreateVerifiedAccessTrustProviderRequest(*ec2.CreateVerifiedAccessTrustProviderInput) (*request.Request, *ec2.CreateVerifiedAccessTrustProviderOutput)
+
+ CreateVolume(*ec2.CreateVolumeInput) (*ec2.Volume, error)
+ CreateVolumeWithContext(aws.Context, *ec2.CreateVolumeInput, ...request.Option) (*ec2.Volume, error)
+ CreateVolumeRequest(*ec2.CreateVolumeInput) (*request.Request, *ec2.Volume)
+
+ CreateVpc(*ec2.CreateVpcInput) (*ec2.CreateVpcOutput, error)
+ CreateVpcWithContext(aws.Context, *ec2.CreateVpcInput, ...request.Option) (*ec2.CreateVpcOutput, error)
+ CreateVpcRequest(*ec2.CreateVpcInput) (*request.Request, *ec2.CreateVpcOutput)
+
+ CreateVpcEndpoint(*ec2.CreateVpcEndpointInput) (*ec2.CreateVpcEndpointOutput, error)
+ CreateVpcEndpointWithContext(aws.Context, *ec2.CreateVpcEndpointInput, ...request.Option) (*ec2.CreateVpcEndpointOutput, error)
+ CreateVpcEndpointRequest(*ec2.CreateVpcEndpointInput) (*request.Request, *ec2.CreateVpcEndpointOutput)
+
+ CreateVpcEndpointConnectionNotification(*ec2.CreateVpcEndpointConnectionNotificationInput) (*ec2.CreateVpcEndpointConnectionNotificationOutput, error)
+ CreateVpcEndpointConnectionNotificationWithContext(aws.Context, *ec2.CreateVpcEndpointConnectionNotificationInput, ...request.Option) (*ec2.CreateVpcEndpointConnectionNotificationOutput, error)
+ CreateVpcEndpointConnectionNotificationRequest(*ec2.CreateVpcEndpointConnectionNotificationInput) (*request.Request, *ec2.CreateVpcEndpointConnectionNotificationOutput)
+
+ CreateVpcEndpointServiceConfiguration(*ec2.CreateVpcEndpointServiceConfigurationInput) (*ec2.CreateVpcEndpointServiceConfigurationOutput, error)
+ CreateVpcEndpointServiceConfigurationWithContext(aws.Context, *ec2.CreateVpcEndpointServiceConfigurationInput, ...request.Option) (*ec2.CreateVpcEndpointServiceConfigurationOutput, error)
+ CreateVpcEndpointServiceConfigurationRequest(*ec2.CreateVpcEndpointServiceConfigurationInput) (*request.Request, *ec2.CreateVpcEndpointServiceConfigurationOutput)
+
+ CreateVpcPeeringConnection(*ec2.CreateVpcPeeringConnectionInput) (*ec2.CreateVpcPeeringConnectionOutput, error)
+ CreateVpcPeeringConnectionWithContext(aws.Context, *ec2.CreateVpcPeeringConnectionInput, ...request.Option) (*ec2.CreateVpcPeeringConnectionOutput, error)
+ CreateVpcPeeringConnectionRequest(*ec2.CreateVpcPeeringConnectionInput) (*request.Request, *ec2.CreateVpcPeeringConnectionOutput)
+
+ CreateVpnConnection(*ec2.CreateVpnConnectionInput) (*ec2.CreateVpnConnectionOutput, error)
+ CreateVpnConnectionWithContext(aws.Context, *ec2.CreateVpnConnectionInput, ...request.Option) (*ec2.CreateVpnConnectionOutput, error)
+ CreateVpnConnectionRequest(*ec2.CreateVpnConnectionInput) (*request.Request, *ec2.CreateVpnConnectionOutput)
+
+ CreateVpnConnectionRoute(*ec2.CreateVpnConnectionRouteInput) (*ec2.CreateVpnConnectionRouteOutput, error)
+ CreateVpnConnectionRouteWithContext(aws.Context, *ec2.CreateVpnConnectionRouteInput, ...request.Option) (*ec2.CreateVpnConnectionRouteOutput, error)
+ CreateVpnConnectionRouteRequest(*ec2.CreateVpnConnectionRouteInput) (*request.Request, *ec2.CreateVpnConnectionRouteOutput)
+
+ CreateVpnGateway(*ec2.CreateVpnGatewayInput) (*ec2.CreateVpnGatewayOutput, error)
+ CreateVpnGatewayWithContext(aws.Context, *ec2.CreateVpnGatewayInput, ...request.Option) (*ec2.CreateVpnGatewayOutput, error)
+ CreateVpnGatewayRequest(*ec2.CreateVpnGatewayInput) (*request.Request, *ec2.CreateVpnGatewayOutput)
+
+ DeleteCarrierGateway(*ec2.DeleteCarrierGatewayInput) (*ec2.DeleteCarrierGatewayOutput, error)
+ DeleteCarrierGatewayWithContext(aws.Context, *ec2.DeleteCarrierGatewayInput, ...request.Option) (*ec2.DeleteCarrierGatewayOutput, error)
+ DeleteCarrierGatewayRequest(*ec2.DeleteCarrierGatewayInput) (*request.Request, *ec2.DeleteCarrierGatewayOutput)
+
+ DeleteClientVpnEndpoint(*ec2.DeleteClientVpnEndpointInput) (*ec2.DeleteClientVpnEndpointOutput, error)
+ DeleteClientVpnEndpointWithContext(aws.Context, *ec2.DeleteClientVpnEndpointInput, ...request.Option) (*ec2.DeleteClientVpnEndpointOutput, error)
+ DeleteClientVpnEndpointRequest(*ec2.DeleteClientVpnEndpointInput) (*request.Request, *ec2.DeleteClientVpnEndpointOutput)
+
+ DeleteClientVpnRoute(*ec2.DeleteClientVpnRouteInput) (*ec2.DeleteClientVpnRouteOutput, error)
+ DeleteClientVpnRouteWithContext(aws.Context, *ec2.DeleteClientVpnRouteInput, ...request.Option) (*ec2.DeleteClientVpnRouteOutput, error)
+ DeleteClientVpnRouteRequest(*ec2.DeleteClientVpnRouteInput) (*request.Request, *ec2.DeleteClientVpnRouteOutput)
+
+ DeleteCoipCidr(*ec2.DeleteCoipCidrInput) (*ec2.DeleteCoipCidrOutput, error)
+ DeleteCoipCidrWithContext(aws.Context, *ec2.DeleteCoipCidrInput, ...request.Option) (*ec2.DeleteCoipCidrOutput, error)
+ DeleteCoipCidrRequest(*ec2.DeleteCoipCidrInput) (*request.Request, *ec2.DeleteCoipCidrOutput)
+
+ DeleteCoipPool(*ec2.DeleteCoipPoolInput) (*ec2.DeleteCoipPoolOutput, error)
+ DeleteCoipPoolWithContext(aws.Context, *ec2.DeleteCoipPoolInput, ...request.Option) (*ec2.DeleteCoipPoolOutput, error)
+ DeleteCoipPoolRequest(*ec2.DeleteCoipPoolInput) (*request.Request, *ec2.DeleteCoipPoolOutput)
+
+ DeleteCustomerGateway(*ec2.DeleteCustomerGatewayInput) (*ec2.DeleteCustomerGatewayOutput, error)
+ DeleteCustomerGatewayWithContext(aws.Context, *ec2.DeleteCustomerGatewayInput, ...request.Option) (*ec2.DeleteCustomerGatewayOutput, error)
+ DeleteCustomerGatewayRequest(*ec2.DeleteCustomerGatewayInput) (*request.Request, *ec2.DeleteCustomerGatewayOutput)
+
+ DeleteDhcpOptions(*ec2.DeleteDhcpOptionsInput) (*ec2.DeleteDhcpOptionsOutput, error)
+ DeleteDhcpOptionsWithContext(aws.Context, *ec2.DeleteDhcpOptionsInput, ...request.Option) (*ec2.DeleteDhcpOptionsOutput, error)
+ DeleteDhcpOptionsRequest(*ec2.DeleteDhcpOptionsInput) (*request.Request, *ec2.DeleteDhcpOptionsOutput)
+
+ DeleteEgressOnlyInternetGateway(*ec2.DeleteEgressOnlyInternetGatewayInput) (*ec2.DeleteEgressOnlyInternetGatewayOutput, error)
+ DeleteEgressOnlyInternetGatewayWithContext(aws.Context, *ec2.DeleteEgressOnlyInternetGatewayInput, ...request.Option) (*ec2.DeleteEgressOnlyInternetGatewayOutput, error)
+ DeleteEgressOnlyInternetGatewayRequest(*ec2.DeleteEgressOnlyInternetGatewayInput) (*request.Request, *ec2.DeleteEgressOnlyInternetGatewayOutput)
+
+ DeleteFleets(*ec2.DeleteFleetsInput) (*ec2.DeleteFleetsOutput, error)
+ DeleteFleetsWithContext(aws.Context, *ec2.DeleteFleetsInput, ...request.Option) (*ec2.DeleteFleetsOutput, error)
+ DeleteFleetsRequest(*ec2.DeleteFleetsInput) (*request.Request, *ec2.DeleteFleetsOutput)
+
+ DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error)
+ DeleteFlowLogsWithContext(aws.Context, *ec2.DeleteFlowLogsInput, ...request.Option) (*ec2.DeleteFlowLogsOutput, error)
+ DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput)
+
+ DeleteFpgaImage(*ec2.DeleteFpgaImageInput) (*ec2.DeleteFpgaImageOutput, error)
+ DeleteFpgaImageWithContext(aws.Context, *ec2.DeleteFpgaImageInput, ...request.Option) (*ec2.DeleteFpgaImageOutput, error)
+ DeleteFpgaImageRequest(*ec2.DeleteFpgaImageInput) (*request.Request, *ec2.DeleteFpgaImageOutput)
+
+ DeleteInstanceEventWindow(*ec2.DeleteInstanceEventWindowInput) (*ec2.DeleteInstanceEventWindowOutput, error)
+ DeleteInstanceEventWindowWithContext(aws.Context, *ec2.DeleteInstanceEventWindowInput, ...request.Option) (*ec2.DeleteInstanceEventWindowOutput, error)
+ DeleteInstanceEventWindowRequest(*ec2.DeleteInstanceEventWindowInput) (*request.Request, *ec2.DeleteInstanceEventWindowOutput)
+
+ DeleteInternetGateway(*ec2.DeleteInternetGatewayInput) (*ec2.DeleteInternetGatewayOutput, error)
+ DeleteInternetGatewayWithContext(aws.Context, *ec2.DeleteInternetGatewayInput, ...request.Option) (*ec2.DeleteInternetGatewayOutput, error)
+ DeleteInternetGatewayRequest(*ec2.DeleteInternetGatewayInput) (*request.Request, *ec2.DeleteInternetGatewayOutput)
+
+ DeleteIpam(*ec2.DeleteIpamInput) (*ec2.DeleteIpamOutput, error)
+ DeleteIpamWithContext(aws.Context, *ec2.DeleteIpamInput, ...request.Option) (*ec2.DeleteIpamOutput, error)
+ DeleteIpamRequest(*ec2.DeleteIpamInput) (*request.Request, *ec2.DeleteIpamOutput)
+
+ DeleteIpamPool(*ec2.DeleteIpamPoolInput) (*ec2.DeleteIpamPoolOutput, error)
+ DeleteIpamPoolWithContext(aws.Context, *ec2.DeleteIpamPoolInput, ...request.Option) (*ec2.DeleteIpamPoolOutput, error)
+ DeleteIpamPoolRequest(*ec2.DeleteIpamPoolInput) (*request.Request, *ec2.DeleteIpamPoolOutput)
+
+ DeleteIpamResourceDiscovery(*ec2.DeleteIpamResourceDiscoveryInput) (*ec2.DeleteIpamResourceDiscoveryOutput, error)
+ DeleteIpamResourceDiscoveryWithContext(aws.Context, *ec2.DeleteIpamResourceDiscoveryInput, ...request.Option) (*ec2.DeleteIpamResourceDiscoveryOutput, error)
+ DeleteIpamResourceDiscoveryRequest(*ec2.DeleteIpamResourceDiscoveryInput) (*request.Request, *ec2.DeleteIpamResourceDiscoveryOutput)
+
+ DeleteIpamScope(*ec2.DeleteIpamScopeInput) (*ec2.DeleteIpamScopeOutput, error)
+ DeleteIpamScopeWithContext(aws.Context, *ec2.DeleteIpamScopeInput, ...request.Option) (*ec2.DeleteIpamScopeOutput, error)
+ DeleteIpamScopeRequest(*ec2.DeleteIpamScopeInput) (*request.Request, *ec2.DeleteIpamScopeOutput)
+
+ DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error)
+ DeleteKeyPairWithContext(aws.Context, *ec2.DeleteKeyPairInput, ...request.Option) (*ec2.DeleteKeyPairOutput, error)
+ DeleteKeyPairRequest(*ec2.DeleteKeyPairInput) (*request.Request, *ec2.DeleteKeyPairOutput)
+
+ DeleteLaunchTemplate(*ec2.DeleteLaunchTemplateInput) (*ec2.DeleteLaunchTemplateOutput, error)
+ DeleteLaunchTemplateWithContext(aws.Context, *ec2.DeleteLaunchTemplateInput, ...request.Option) (*ec2.DeleteLaunchTemplateOutput, error)
+ DeleteLaunchTemplateRequest(*ec2.DeleteLaunchTemplateInput) (*request.Request, *ec2.DeleteLaunchTemplateOutput)
+
+ DeleteLaunchTemplateVersions(*ec2.DeleteLaunchTemplateVersionsInput) (*ec2.DeleteLaunchTemplateVersionsOutput, error)
+ DeleteLaunchTemplateVersionsWithContext(aws.Context, *ec2.DeleteLaunchTemplateVersionsInput, ...request.Option) (*ec2.DeleteLaunchTemplateVersionsOutput, error)
+ DeleteLaunchTemplateVersionsRequest(*ec2.DeleteLaunchTemplateVersionsInput) (*request.Request, *ec2.DeleteLaunchTemplateVersionsOutput)
+
+ DeleteLocalGatewayRoute(*ec2.DeleteLocalGatewayRouteInput) (*ec2.DeleteLocalGatewayRouteOutput, error)
+ DeleteLocalGatewayRouteWithContext(aws.Context, *ec2.DeleteLocalGatewayRouteInput, ...request.Option) (*ec2.DeleteLocalGatewayRouteOutput, error)
+ DeleteLocalGatewayRouteRequest(*ec2.DeleteLocalGatewayRouteInput) (*request.Request, *ec2.DeleteLocalGatewayRouteOutput)
+
+ DeleteLocalGatewayRouteTable(*ec2.DeleteLocalGatewayRouteTableInput) (*ec2.DeleteLocalGatewayRouteTableOutput, error)
+ DeleteLocalGatewayRouteTableWithContext(aws.Context, *ec2.DeleteLocalGatewayRouteTableInput, ...request.Option) (*ec2.DeleteLocalGatewayRouteTableOutput, error)
+ DeleteLocalGatewayRouteTableRequest(*ec2.DeleteLocalGatewayRouteTableInput) (*request.Request, *ec2.DeleteLocalGatewayRouteTableOutput)
+
+ DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation(*ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) (*ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput, error)
+ DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationWithContext(aws.Context, *ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput, ...request.Option) (*ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput, error)
+ DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationRequest(*ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationInput) (*request.Request, *ec2.DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput)
+
+ DeleteLocalGatewayRouteTableVpcAssociation(*ec2.DeleteLocalGatewayRouteTableVpcAssociationInput) (*ec2.DeleteLocalGatewayRouteTableVpcAssociationOutput, error)
+ DeleteLocalGatewayRouteTableVpcAssociationWithContext(aws.Context, *ec2.DeleteLocalGatewayRouteTableVpcAssociationInput, ...request.Option) (*ec2.DeleteLocalGatewayRouteTableVpcAssociationOutput, error)
+ DeleteLocalGatewayRouteTableVpcAssociationRequest(*ec2.DeleteLocalGatewayRouteTableVpcAssociationInput) (*request.Request, *ec2.DeleteLocalGatewayRouteTableVpcAssociationOutput)
+
+ DeleteManagedPrefixList(*ec2.DeleteManagedPrefixListInput) (*ec2.DeleteManagedPrefixListOutput, error)
+ DeleteManagedPrefixListWithContext(aws.Context, *ec2.DeleteManagedPrefixListInput, ...request.Option) (*ec2.DeleteManagedPrefixListOutput, error)
+ DeleteManagedPrefixListRequest(*ec2.DeleteManagedPrefixListInput) (*request.Request, *ec2.DeleteManagedPrefixListOutput)
+
+ DeleteNatGateway(*ec2.DeleteNatGatewayInput) (*ec2.DeleteNatGatewayOutput, error)
+ DeleteNatGatewayWithContext(aws.Context, *ec2.DeleteNatGatewayInput, ...request.Option) (*ec2.DeleteNatGatewayOutput, error)
+ DeleteNatGatewayRequest(*ec2.DeleteNatGatewayInput) (*request.Request, *ec2.DeleteNatGatewayOutput)
+
+ DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error)
+ DeleteNetworkAclWithContext(aws.Context, *ec2.DeleteNetworkAclInput, ...request.Option) (*ec2.DeleteNetworkAclOutput, error)
+ DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput)
+
+ DeleteNetworkAclEntry(*ec2.DeleteNetworkAclEntryInput) (*ec2.DeleteNetworkAclEntryOutput, error)
+ DeleteNetworkAclEntryWithContext(aws.Context, *ec2.DeleteNetworkAclEntryInput, ...request.Option) (*ec2.DeleteNetworkAclEntryOutput, error)
+ DeleteNetworkAclEntryRequest(*ec2.DeleteNetworkAclEntryInput) (*request.Request, *ec2.DeleteNetworkAclEntryOutput)
+
+ DeleteNetworkInsightsAccessScope(*ec2.DeleteNetworkInsightsAccessScopeInput) (*ec2.DeleteNetworkInsightsAccessScopeOutput, error)
+ DeleteNetworkInsightsAccessScopeWithContext(aws.Context, *ec2.DeleteNetworkInsightsAccessScopeInput, ...request.Option) (*ec2.DeleteNetworkInsightsAccessScopeOutput, error)
+ DeleteNetworkInsightsAccessScopeRequest(*ec2.DeleteNetworkInsightsAccessScopeInput) (*request.Request, *ec2.DeleteNetworkInsightsAccessScopeOutput)
+
+ DeleteNetworkInsightsAccessScopeAnalysis(*ec2.DeleteNetworkInsightsAccessScopeAnalysisInput) (*ec2.DeleteNetworkInsightsAccessScopeAnalysisOutput, error)
+ DeleteNetworkInsightsAccessScopeAnalysisWithContext(aws.Context, *ec2.DeleteNetworkInsightsAccessScopeAnalysisInput, ...request.Option) (*ec2.DeleteNetworkInsightsAccessScopeAnalysisOutput, error)
+ DeleteNetworkInsightsAccessScopeAnalysisRequest(*ec2.DeleteNetworkInsightsAccessScopeAnalysisInput) (*request.Request, *ec2.DeleteNetworkInsightsAccessScopeAnalysisOutput)
+
+ DeleteNetworkInsightsAnalysis(*ec2.DeleteNetworkInsightsAnalysisInput) (*ec2.DeleteNetworkInsightsAnalysisOutput, error)
+ DeleteNetworkInsightsAnalysisWithContext(aws.Context, *ec2.DeleteNetworkInsightsAnalysisInput, ...request.Option) (*ec2.DeleteNetworkInsightsAnalysisOutput, error)
+ DeleteNetworkInsightsAnalysisRequest(*ec2.DeleteNetworkInsightsAnalysisInput) (*request.Request, *ec2.DeleteNetworkInsightsAnalysisOutput)
+
+ DeleteNetworkInsightsPath(*ec2.DeleteNetworkInsightsPathInput) (*ec2.DeleteNetworkInsightsPathOutput, error)
+ DeleteNetworkInsightsPathWithContext(aws.Context, *ec2.DeleteNetworkInsightsPathInput, ...request.Option) (*ec2.DeleteNetworkInsightsPathOutput, error)
+ DeleteNetworkInsightsPathRequest(*ec2.DeleteNetworkInsightsPathInput) (*request.Request, *ec2.DeleteNetworkInsightsPathOutput)
+
+ DeleteNetworkInterface(*ec2.DeleteNetworkInterfaceInput) (*ec2.DeleteNetworkInterfaceOutput, error)
+ DeleteNetworkInterfaceWithContext(aws.Context, *ec2.DeleteNetworkInterfaceInput, ...request.Option) (*ec2.DeleteNetworkInterfaceOutput, error)
+ DeleteNetworkInterfaceRequest(*ec2.DeleteNetworkInterfaceInput) (*request.Request, *ec2.DeleteNetworkInterfaceOutput)
+
+ DeleteNetworkInterfacePermission(*ec2.DeleteNetworkInterfacePermissionInput) (*ec2.DeleteNetworkInterfacePermissionOutput, error)
+ DeleteNetworkInterfacePermissionWithContext(aws.Context, *ec2.DeleteNetworkInterfacePermissionInput, ...request.Option) (*ec2.DeleteNetworkInterfacePermissionOutput, error)
+ DeleteNetworkInterfacePermissionRequest(*ec2.DeleteNetworkInterfacePermissionInput) (*request.Request, *ec2.DeleteNetworkInterfacePermissionOutput)
+
+ DeletePlacementGroup(*ec2.DeletePlacementGroupInput) (*ec2.DeletePlacementGroupOutput, error)
+ DeletePlacementGroupWithContext(aws.Context, *ec2.DeletePlacementGroupInput, ...request.Option) (*ec2.DeletePlacementGroupOutput, error)
+ DeletePlacementGroupRequest(*ec2.DeletePlacementGroupInput) (*request.Request, *ec2.DeletePlacementGroupOutput)
+
+ DeletePublicIpv4Pool(*ec2.DeletePublicIpv4PoolInput) (*ec2.DeletePublicIpv4PoolOutput, error)
+ DeletePublicIpv4PoolWithContext(aws.Context, *ec2.DeletePublicIpv4PoolInput, ...request.Option) (*ec2.DeletePublicIpv4PoolOutput, error)
+ DeletePublicIpv4PoolRequest(*ec2.DeletePublicIpv4PoolInput) (*request.Request, *ec2.DeletePublicIpv4PoolOutput)
+
+ DeleteQueuedReservedInstances(*ec2.DeleteQueuedReservedInstancesInput) (*ec2.DeleteQueuedReservedInstancesOutput, error)
+ DeleteQueuedReservedInstancesWithContext(aws.Context, *ec2.DeleteQueuedReservedInstancesInput, ...request.Option) (*ec2.DeleteQueuedReservedInstancesOutput, error)
+ DeleteQueuedReservedInstancesRequest(*ec2.DeleteQueuedReservedInstancesInput) (*request.Request, *ec2.DeleteQueuedReservedInstancesOutput)
+
+ DeleteRoute(*ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error)
+ DeleteRouteWithContext(aws.Context, *ec2.DeleteRouteInput, ...request.Option) (*ec2.DeleteRouteOutput, error)
+ DeleteRouteRequest(*ec2.DeleteRouteInput) (*request.Request, *ec2.DeleteRouteOutput)
+
+ DeleteRouteTable(*ec2.DeleteRouteTableInput) (*ec2.DeleteRouteTableOutput, error)
+ DeleteRouteTableWithContext(aws.Context, *ec2.DeleteRouteTableInput, ...request.Option) (*ec2.DeleteRouteTableOutput, error)
+ DeleteRouteTableRequest(*ec2.DeleteRouteTableInput) (*request.Request, *ec2.DeleteRouteTableOutput)
+
+ DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error)
+ DeleteSecurityGroupWithContext(aws.Context, *ec2.DeleteSecurityGroupInput, ...request.Option) (*ec2.DeleteSecurityGroupOutput, error)
+ DeleteSecurityGroupRequest(*ec2.DeleteSecurityGroupInput) (*request.Request, *ec2.DeleteSecurityGroupOutput)
+
+ DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error)
+ DeleteSnapshotWithContext(aws.Context, *ec2.DeleteSnapshotInput, ...request.Option) (*ec2.DeleteSnapshotOutput, error)
+ DeleteSnapshotRequest(*ec2.DeleteSnapshotInput) (*request.Request, *ec2.DeleteSnapshotOutput)
+
+ DeleteSpotDatafeedSubscription(*ec2.DeleteSpotDatafeedSubscriptionInput) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error)
+ DeleteSpotDatafeedSubscriptionWithContext(aws.Context, *ec2.DeleteSpotDatafeedSubscriptionInput, ...request.Option) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error)
+ DeleteSpotDatafeedSubscriptionRequest(*ec2.DeleteSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DeleteSpotDatafeedSubscriptionOutput)
+
+ DeleteSubnet(*ec2.DeleteSubnetInput) (*ec2.DeleteSubnetOutput, error)
+ DeleteSubnetWithContext(aws.Context, *ec2.DeleteSubnetInput, ...request.Option) (*ec2.DeleteSubnetOutput, error)
+ DeleteSubnetRequest(*ec2.DeleteSubnetInput) (*request.Request, *ec2.DeleteSubnetOutput)
+
+ DeleteSubnetCidrReservation(*ec2.DeleteSubnetCidrReservationInput) (*ec2.DeleteSubnetCidrReservationOutput, error)
+ DeleteSubnetCidrReservationWithContext(aws.Context, *ec2.DeleteSubnetCidrReservationInput, ...request.Option) (*ec2.DeleteSubnetCidrReservationOutput, error)
+ DeleteSubnetCidrReservationRequest(*ec2.DeleteSubnetCidrReservationInput) (*request.Request, *ec2.DeleteSubnetCidrReservationOutput)
+
+ DeleteTags(*ec2.DeleteTagsInput) (*ec2.DeleteTagsOutput, error)
+ DeleteTagsWithContext(aws.Context, *ec2.DeleteTagsInput, ...request.Option) (*ec2.DeleteTagsOutput, error)
+ DeleteTagsRequest(*ec2.DeleteTagsInput) (*request.Request, *ec2.DeleteTagsOutput)
+
+ DeleteTrafficMirrorFilter(*ec2.DeleteTrafficMirrorFilterInput) (*ec2.DeleteTrafficMirrorFilterOutput, error)
+ DeleteTrafficMirrorFilterWithContext(aws.Context, *ec2.DeleteTrafficMirrorFilterInput, ...request.Option) (*ec2.DeleteTrafficMirrorFilterOutput, error)
+ DeleteTrafficMirrorFilterRequest(*ec2.DeleteTrafficMirrorFilterInput) (*request.Request, *ec2.DeleteTrafficMirrorFilterOutput)
+
+ DeleteTrafficMirrorFilterRule(*ec2.DeleteTrafficMirrorFilterRuleInput) (*ec2.DeleteTrafficMirrorFilterRuleOutput, error)
+ DeleteTrafficMirrorFilterRuleWithContext(aws.Context, *ec2.DeleteTrafficMirrorFilterRuleInput, ...request.Option) (*ec2.DeleteTrafficMirrorFilterRuleOutput, error)
+ DeleteTrafficMirrorFilterRuleRequest(*ec2.DeleteTrafficMirrorFilterRuleInput) (*request.Request, *ec2.DeleteTrafficMirrorFilterRuleOutput)
+
+ DeleteTrafficMirrorSession(*ec2.DeleteTrafficMirrorSessionInput) (*ec2.DeleteTrafficMirrorSessionOutput, error)
+ DeleteTrafficMirrorSessionWithContext(aws.Context, *ec2.DeleteTrafficMirrorSessionInput, ...request.Option) (*ec2.DeleteTrafficMirrorSessionOutput, error)
+ DeleteTrafficMirrorSessionRequest(*ec2.DeleteTrafficMirrorSessionInput) (*request.Request, *ec2.DeleteTrafficMirrorSessionOutput)
+
+ DeleteTrafficMirrorTarget(*ec2.DeleteTrafficMirrorTargetInput) (*ec2.DeleteTrafficMirrorTargetOutput, error)
+ DeleteTrafficMirrorTargetWithContext(aws.Context, *ec2.DeleteTrafficMirrorTargetInput, ...request.Option) (*ec2.DeleteTrafficMirrorTargetOutput, error)
+ DeleteTrafficMirrorTargetRequest(*ec2.DeleteTrafficMirrorTargetInput) (*request.Request, *ec2.DeleteTrafficMirrorTargetOutput)
+
+ DeleteTransitGateway(*ec2.DeleteTransitGatewayInput) (*ec2.DeleteTransitGatewayOutput, error)
+ DeleteTransitGatewayWithContext(aws.Context, *ec2.DeleteTransitGatewayInput, ...request.Option) (*ec2.DeleteTransitGatewayOutput, error)
+ DeleteTransitGatewayRequest(*ec2.DeleteTransitGatewayInput) (*request.Request, *ec2.DeleteTransitGatewayOutput)
+
+ DeleteTransitGatewayConnect(*ec2.DeleteTransitGatewayConnectInput) (*ec2.DeleteTransitGatewayConnectOutput, error)
+ DeleteTransitGatewayConnectWithContext(aws.Context, *ec2.DeleteTransitGatewayConnectInput, ...request.Option) (*ec2.DeleteTransitGatewayConnectOutput, error)
+ DeleteTransitGatewayConnectRequest(*ec2.DeleteTransitGatewayConnectInput) (*request.Request, *ec2.DeleteTransitGatewayConnectOutput)
+
+ DeleteTransitGatewayConnectPeer(*ec2.DeleteTransitGatewayConnectPeerInput) (*ec2.DeleteTransitGatewayConnectPeerOutput, error)
+ DeleteTransitGatewayConnectPeerWithContext(aws.Context, *ec2.DeleteTransitGatewayConnectPeerInput, ...request.Option) (*ec2.DeleteTransitGatewayConnectPeerOutput, error)
+ DeleteTransitGatewayConnectPeerRequest(*ec2.DeleteTransitGatewayConnectPeerInput) (*request.Request, *ec2.DeleteTransitGatewayConnectPeerOutput)
+
+ DeleteTransitGatewayMulticastDomain(*ec2.DeleteTransitGatewayMulticastDomainInput) (*ec2.DeleteTransitGatewayMulticastDomainOutput, error)
+ DeleteTransitGatewayMulticastDomainWithContext(aws.Context, *ec2.DeleteTransitGatewayMulticastDomainInput, ...request.Option) (*ec2.DeleteTransitGatewayMulticastDomainOutput, error)
+ DeleteTransitGatewayMulticastDomainRequest(*ec2.DeleteTransitGatewayMulticastDomainInput) (*request.Request, *ec2.DeleteTransitGatewayMulticastDomainOutput)
+
+ DeleteTransitGatewayPeeringAttachment(*ec2.DeleteTransitGatewayPeeringAttachmentInput) (*ec2.DeleteTransitGatewayPeeringAttachmentOutput, error)
+ DeleteTransitGatewayPeeringAttachmentWithContext(aws.Context, *ec2.DeleteTransitGatewayPeeringAttachmentInput, ...request.Option) (*ec2.DeleteTransitGatewayPeeringAttachmentOutput, error)
+ DeleteTransitGatewayPeeringAttachmentRequest(*ec2.DeleteTransitGatewayPeeringAttachmentInput) (*request.Request, *ec2.DeleteTransitGatewayPeeringAttachmentOutput)
+
+ DeleteTransitGatewayPolicyTable(*ec2.DeleteTransitGatewayPolicyTableInput) (*ec2.DeleteTransitGatewayPolicyTableOutput, error)
+ DeleteTransitGatewayPolicyTableWithContext(aws.Context, *ec2.DeleteTransitGatewayPolicyTableInput, ...request.Option) (*ec2.DeleteTransitGatewayPolicyTableOutput, error)
+ DeleteTransitGatewayPolicyTableRequest(*ec2.DeleteTransitGatewayPolicyTableInput) (*request.Request, *ec2.DeleteTransitGatewayPolicyTableOutput)
+
+ DeleteTransitGatewayPrefixListReference(*ec2.DeleteTransitGatewayPrefixListReferenceInput) (*ec2.DeleteTransitGatewayPrefixListReferenceOutput, error)
+ DeleteTransitGatewayPrefixListReferenceWithContext(aws.Context, *ec2.DeleteTransitGatewayPrefixListReferenceInput, ...request.Option) (*ec2.DeleteTransitGatewayPrefixListReferenceOutput, error)
+ DeleteTransitGatewayPrefixListReferenceRequest(*ec2.DeleteTransitGatewayPrefixListReferenceInput) (*request.Request, *ec2.DeleteTransitGatewayPrefixListReferenceOutput)
+
+ DeleteTransitGatewayRoute(*ec2.DeleteTransitGatewayRouteInput) (*ec2.DeleteTransitGatewayRouteOutput, error)
+ DeleteTransitGatewayRouteWithContext(aws.Context, *ec2.DeleteTransitGatewayRouteInput, ...request.Option) (*ec2.DeleteTransitGatewayRouteOutput, error)
+ DeleteTransitGatewayRouteRequest(*ec2.DeleteTransitGatewayRouteInput) (*request.Request, *ec2.DeleteTransitGatewayRouteOutput)
+
+ DeleteTransitGatewayRouteTable(*ec2.DeleteTransitGatewayRouteTableInput) (*ec2.DeleteTransitGatewayRouteTableOutput, error)
+ DeleteTransitGatewayRouteTableWithContext(aws.Context, *ec2.DeleteTransitGatewayRouteTableInput, ...request.Option) (*ec2.DeleteTransitGatewayRouteTableOutput, error)
+ DeleteTransitGatewayRouteTableRequest(*ec2.DeleteTransitGatewayRouteTableInput) (*request.Request, *ec2.DeleteTransitGatewayRouteTableOutput)
+
+ DeleteTransitGatewayRouteTableAnnouncement(*ec2.DeleteTransitGatewayRouteTableAnnouncementInput) (*ec2.DeleteTransitGatewayRouteTableAnnouncementOutput, error)
+ DeleteTransitGatewayRouteTableAnnouncementWithContext(aws.Context, *ec2.DeleteTransitGatewayRouteTableAnnouncementInput, ...request.Option) (*ec2.DeleteTransitGatewayRouteTableAnnouncementOutput, error)
+ DeleteTransitGatewayRouteTableAnnouncementRequest(*ec2.DeleteTransitGatewayRouteTableAnnouncementInput) (*request.Request, *ec2.DeleteTransitGatewayRouteTableAnnouncementOutput)
+
+ DeleteTransitGatewayVpcAttachment(*ec2.DeleteTransitGatewayVpcAttachmentInput) (*ec2.DeleteTransitGatewayVpcAttachmentOutput, error)
+ DeleteTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.DeleteTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.DeleteTransitGatewayVpcAttachmentOutput, error)
+ DeleteTransitGatewayVpcAttachmentRequest(*ec2.DeleteTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.DeleteTransitGatewayVpcAttachmentOutput)
+
+ DeleteVerifiedAccessEndpoint(*ec2.DeleteVerifiedAccessEndpointInput) (*ec2.DeleteVerifiedAccessEndpointOutput, error)
+ DeleteVerifiedAccessEndpointWithContext(aws.Context, *ec2.DeleteVerifiedAccessEndpointInput, ...request.Option) (*ec2.DeleteVerifiedAccessEndpointOutput, error)
+ DeleteVerifiedAccessEndpointRequest(*ec2.DeleteVerifiedAccessEndpointInput) (*request.Request, *ec2.DeleteVerifiedAccessEndpointOutput)
+
+ DeleteVerifiedAccessGroup(*ec2.DeleteVerifiedAccessGroupInput) (*ec2.DeleteVerifiedAccessGroupOutput, error)
+ DeleteVerifiedAccessGroupWithContext(aws.Context, *ec2.DeleteVerifiedAccessGroupInput, ...request.Option) (*ec2.DeleteVerifiedAccessGroupOutput, error)
+ DeleteVerifiedAccessGroupRequest(*ec2.DeleteVerifiedAccessGroupInput) (*request.Request, *ec2.DeleteVerifiedAccessGroupOutput)
+
+ DeleteVerifiedAccessInstance(*ec2.DeleteVerifiedAccessInstanceInput) (*ec2.DeleteVerifiedAccessInstanceOutput, error)
+ DeleteVerifiedAccessInstanceWithContext(aws.Context, *ec2.DeleteVerifiedAccessInstanceInput, ...request.Option) (*ec2.DeleteVerifiedAccessInstanceOutput, error)
+ DeleteVerifiedAccessInstanceRequest(*ec2.DeleteVerifiedAccessInstanceInput) (*request.Request, *ec2.DeleteVerifiedAccessInstanceOutput)
+
+ DeleteVerifiedAccessTrustProvider(*ec2.DeleteVerifiedAccessTrustProviderInput) (*ec2.DeleteVerifiedAccessTrustProviderOutput, error)
+ DeleteVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.DeleteVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.DeleteVerifiedAccessTrustProviderOutput, error)
+ DeleteVerifiedAccessTrustProviderRequest(*ec2.DeleteVerifiedAccessTrustProviderInput) (*request.Request, *ec2.DeleteVerifiedAccessTrustProviderOutput)
+
+ DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
+ DeleteVolumeWithContext(aws.Context, *ec2.DeleteVolumeInput, ...request.Option) (*ec2.DeleteVolumeOutput, error)
+ DeleteVolumeRequest(*ec2.DeleteVolumeInput) (*request.Request, *ec2.DeleteVolumeOutput)
+
+ DeleteVpc(*ec2.DeleteVpcInput) (*ec2.DeleteVpcOutput, error)
+ DeleteVpcWithContext(aws.Context, *ec2.DeleteVpcInput, ...request.Option) (*ec2.DeleteVpcOutput, error)
+ DeleteVpcRequest(*ec2.DeleteVpcInput) (*request.Request, *ec2.DeleteVpcOutput)
+
+ DeleteVpcEndpointConnectionNotifications(*ec2.DeleteVpcEndpointConnectionNotificationsInput) (*ec2.DeleteVpcEndpointConnectionNotificationsOutput, error)
+ DeleteVpcEndpointConnectionNotificationsWithContext(aws.Context, *ec2.DeleteVpcEndpointConnectionNotificationsInput, ...request.Option) (*ec2.DeleteVpcEndpointConnectionNotificationsOutput, error)
+ DeleteVpcEndpointConnectionNotificationsRequest(*ec2.DeleteVpcEndpointConnectionNotificationsInput) (*request.Request, *ec2.DeleteVpcEndpointConnectionNotificationsOutput)
+
+ DeleteVpcEndpointServiceConfigurations(*ec2.DeleteVpcEndpointServiceConfigurationsInput) (*ec2.DeleteVpcEndpointServiceConfigurationsOutput, error)
+ DeleteVpcEndpointServiceConfigurationsWithContext(aws.Context, *ec2.DeleteVpcEndpointServiceConfigurationsInput, ...request.Option) (*ec2.DeleteVpcEndpointServiceConfigurationsOutput, error)
+ DeleteVpcEndpointServiceConfigurationsRequest(*ec2.DeleteVpcEndpointServiceConfigurationsInput) (*request.Request, *ec2.DeleteVpcEndpointServiceConfigurationsOutput)
+
+ DeleteVpcEndpoints(*ec2.DeleteVpcEndpointsInput) (*ec2.DeleteVpcEndpointsOutput, error)
+ DeleteVpcEndpointsWithContext(aws.Context, *ec2.DeleteVpcEndpointsInput, ...request.Option) (*ec2.DeleteVpcEndpointsOutput, error)
+ DeleteVpcEndpointsRequest(*ec2.DeleteVpcEndpointsInput) (*request.Request, *ec2.DeleteVpcEndpointsOutput)
+
+ DeleteVpcPeeringConnection(*ec2.DeleteVpcPeeringConnectionInput) (*ec2.DeleteVpcPeeringConnectionOutput, error)
+ DeleteVpcPeeringConnectionWithContext(aws.Context, *ec2.DeleteVpcPeeringConnectionInput, ...request.Option) (*ec2.DeleteVpcPeeringConnectionOutput, error)
+ DeleteVpcPeeringConnectionRequest(*ec2.DeleteVpcPeeringConnectionInput) (*request.Request, *ec2.DeleteVpcPeeringConnectionOutput)
+
+ DeleteVpnConnection(*ec2.DeleteVpnConnectionInput) (*ec2.DeleteVpnConnectionOutput, error)
+ DeleteVpnConnectionWithContext(aws.Context, *ec2.DeleteVpnConnectionInput, ...request.Option) (*ec2.DeleteVpnConnectionOutput, error)
+ DeleteVpnConnectionRequest(*ec2.DeleteVpnConnectionInput) (*request.Request, *ec2.DeleteVpnConnectionOutput)
+
+ DeleteVpnConnectionRoute(*ec2.DeleteVpnConnectionRouteInput) (*ec2.DeleteVpnConnectionRouteOutput, error)
+ DeleteVpnConnectionRouteWithContext(aws.Context, *ec2.DeleteVpnConnectionRouteInput, ...request.Option) (*ec2.DeleteVpnConnectionRouteOutput, error)
+ DeleteVpnConnectionRouteRequest(*ec2.DeleteVpnConnectionRouteInput) (*request.Request, *ec2.DeleteVpnConnectionRouteOutput)
+
+ DeleteVpnGateway(*ec2.DeleteVpnGatewayInput) (*ec2.DeleteVpnGatewayOutput, error)
+ DeleteVpnGatewayWithContext(aws.Context, *ec2.DeleteVpnGatewayInput, ...request.Option) (*ec2.DeleteVpnGatewayOutput, error)
+ DeleteVpnGatewayRequest(*ec2.DeleteVpnGatewayInput) (*request.Request, *ec2.DeleteVpnGatewayOutput)
+
+ DeprovisionByoipCidr(*ec2.DeprovisionByoipCidrInput) (*ec2.DeprovisionByoipCidrOutput, error)
+ DeprovisionByoipCidrWithContext(aws.Context, *ec2.DeprovisionByoipCidrInput, ...request.Option) (*ec2.DeprovisionByoipCidrOutput, error)
+ DeprovisionByoipCidrRequest(*ec2.DeprovisionByoipCidrInput) (*request.Request, *ec2.DeprovisionByoipCidrOutput)
+
+ DeprovisionIpamPoolCidr(*ec2.DeprovisionIpamPoolCidrInput) (*ec2.DeprovisionIpamPoolCidrOutput, error)
+ DeprovisionIpamPoolCidrWithContext(aws.Context, *ec2.DeprovisionIpamPoolCidrInput, ...request.Option) (*ec2.DeprovisionIpamPoolCidrOutput, error)
+ DeprovisionIpamPoolCidrRequest(*ec2.DeprovisionIpamPoolCidrInput) (*request.Request, *ec2.DeprovisionIpamPoolCidrOutput)
+
+ DeprovisionPublicIpv4PoolCidr(*ec2.DeprovisionPublicIpv4PoolCidrInput) (*ec2.DeprovisionPublicIpv4PoolCidrOutput, error)
+ DeprovisionPublicIpv4PoolCidrWithContext(aws.Context, *ec2.DeprovisionPublicIpv4PoolCidrInput, ...request.Option) (*ec2.DeprovisionPublicIpv4PoolCidrOutput, error)
+ DeprovisionPublicIpv4PoolCidrRequest(*ec2.DeprovisionPublicIpv4PoolCidrInput) (*request.Request, *ec2.DeprovisionPublicIpv4PoolCidrOutput)
+
+ DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error)
+ DeregisterImageWithContext(aws.Context, *ec2.DeregisterImageInput, ...request.Option) (*ec2.DeregisterImageOutput, error)
+ DeregisterImageRequest(*ec2.DeregisterImageInput) (*request.Request, *ec2.DeregisterImageOutput)
+
+ DeregisterInstanceEventNotificationAttributes(*ec2.DeregisterInstanceEventNotificationAttributesInput) (*ec2.DeregisterInstanceEventNotificationAttributesOutput, error)
+ DeregisterInstanceEventNotificationAttributesWithContext(aws.Context, *ec2.DeregisterInstanceEventNotificationAttributesInput, ...request.Option) (*ec2.DeregisterInstanceEventNotificationAttributesOutput, error)
+ DeregisterInstanceEventNotificationAttributesRequest(*ec2.DeregisterInstanceEventNotificationAttributesInput) (*request.Request, *ec2.DeregisterInstanceEventNotificationAttributesOutput)
+
+ DeregisterTransitGatewayMulticastGroupMembers(*ec2.DeregisterTransitGatewayMulticastGroupMembersInput) (*ec2.DeregisterTransitGatewayMulticastGroupMembersOutput, error)
+ DeregisterTransitGatewayMulticastGroupMembersWithContext(aws.Context, *ec2.DeregisterTransitGatewayMulticastGroupMembersInput, ...request.Option) (*ec2.DeregisterTransitGatewayMulticastGroupMembersOutput, error)
+ DeregisterTransitGatewayMulticastGroupMembersRequest(*ec2.DeregisterTransitGatewayMulticastGroupMembersInput) (*request.Request, *ec2.DeregisterTransitGatewayMulticastGroupMembersOutput)
+
+ DeregisterTransitGatewayMulticastGroupSources(*ec2.DeregisterTransitGatewayMulticastGroupSourcesInput) (*ec2.DeregisterTransitGatewayMulticastGroupSourcesOutput, error)
+ DeregisterTransitGatewayMulticastGroupSourcesWithContext(aws.Context, *ec2.DeregisterTransitGatewayMulticastGroupSourcesInput, ...request.Option) (*ec2.DeregisterTransitGatewayMulticastGroupSourcesOutput, error)
+ DeregisterTransitGatewayMulticastGroupSourcesRequest(*ec2.DeregisterTransitGatewayMulticastGroupSourcesInput) (*request.Request, *ec2.DeregisterTransitGatewayMulticastGroupSourcesOutput)
+
+ DescribeAccountAttributes(*ec2.DescribeAccountAttributesInput) (*ec2.DescribeAccountAttributesOutput, error)
+ DescribeAccountAttributesWithContext(aws.Context, *ec2.DescribeAccountAttributesInput, ...request.Option) (*ec2.DescribeAccountAttributesOutput, error)
+ DescribeAccountAttributesRequest(*ec2.DescribeAccountAttributesInput) (*request.Request, *ec2.DescribeAccountAttributesOutput)
+
+ DescribeAddressTransfers(*ec2.DescribeAddressTransfersInput) (*ec2.DescribeAddressTransfersOutput, error)
+ DescribeAddressTransfersWithContext(aws.Context, *ec2.DescribeAddressTransfersInput, ...request.Option) (*ec2.DescribeAddressTransfersOutput, error)
+ DescribeAddressTransfersRequest(*ec2.DescribeAddressTransfersInput) (*request.Request, *ec2.DescribeAddressTransfersOutput)
+
+ DescribeAddressTransfersPages(*ec2.DescribeAddressTransfersInput, func(*ec2.DescribeAddressTransfersOutput, bool) bool) error
+ DescribeAddressTransfersPagesWithContext(aws.Context, *ec2.DescribeAddressTransfersInput, func(*ec2.DescribeAddressTransfersOutput, bool) bool, ...request.Option) error
+
+ DescribeAddresses(*ec2.DescribeAddressesInput) (*ec2.DescribeAddressesOutput, error)
+ DescribeAddressesWithContext(aws.Context, *ec2.DescribeAddressesInput, ...request.Option) (*ec2.DescribeAddressesOutput, error)
+ DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput)
+
+ DescribeAddressesAttribute(*ec2.DescribeAddressesAttributeInput) (*ec2.DescribeAddressesAttributeOutput, error)
+ DescribeAddressesAttributeWithContext(aws.Context, *ec2.DescribeAddressesAttributeInput, ...request.Option) (*ec2.DescribeAddressesAttributeOutput, error)
+ DescribeAddressesAttributeRequest(*ec2.DescribeAddressesAttributeInput) (*request.Request, *ec2.DescribeAddressesAttributeOutput)
+
+ DescribeAddressesAttributePages(*ec2.DescribeAddressesAttributeInput, func(*ec2.DescribeAddressesAttributeOutput, bool) bool) error
+ DescribeAddressesAttributePagesWithContext(aws.Context, *ec2.DescribeAddressesAttributeInput, func(*ec2.DescribeAddressesAttributeOutput, bool) bool, ...request.Option) error
+
+ DescribeAggregateIdFormat(*ec2.DescribeAggregateIdFormatInput) (*ec2.DescribeAggregateIdFormatOutput, error)
+ DescribeAggregateIdFormatWithContext(aws.Context, *ec2.DescribeAggregateIdFormatInput, ...request.Option) (*ec2.DescribeAggregateIdFormatOutput, error)
+ DescribeAggregateIdFormatRequest(*ec2.DescribeAggregateIdFormatInput) (*request.Request, *ec2.DescribeAggregateIdFormatOutput)
+
+ DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error)
+ DescribeAvailabilityZonesWithContext(aws.Context, *ec2.DescribeAvailabilityZonesInput, ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error)
+ DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput)
+
+ DescribeAwsNetworkPerformanceMetricSubscriptions(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput) (*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, error)
+ DescribeAwsNetworkPerformanceMetricSubscriptionsWithContext(aws.Context, *ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput, ...request.Option) (*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, error)
+ DescribeAwsNetworkPerformanceMetricSubscriptionsRequest(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput) (*request.Request, *ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput)
+
+ DescribeAwsNetworkPerformanceMetricSubscriptionsPages(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput, func(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, bool) bool) error
+ DescribeAwsNetworkPerformanceMetricSubscriptionsPagesWithContext(aws.Context, *ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsInput, func(*ec2.DescribeAwsNetworkPerformanceMetricSubscriptionsOutput, bool) bool, ...request.Option) error
+
+ DescribeBundleTasks(*ec2.DescribeBundleTasksInput) (*ec2.DescribeBundleTasksOutput, error)
+ DescribeBundleTasksWithContext(aws.Context, *ec2.DescribeBundleTasksInput, ...request.Option) (*ec2.DescribeBundleTasksOutput, error)
+ DescribeBundleTasksRequest(*ec2.DescribeBundleTasksInput) (*request.Request, *ec2.DescribeBundleTasksOutput)
+
+ DescribeByoipCidrs(*ec2.DescribeByoipCidrsInput) (*ec2.DescribeByoipCidrsOutput, error)
+ DescribeByoipCidrsWithContext(aws.Context, *ec2.DescribeByoipCidrsInput, ...request.Option) (*ec2.DescribeByoipCidrsOutput, error)
+ DescribeByoipCidrsRequest(*ec2.DescribeByoipCidrsInput) (*request.Request, *ec2.DescribeByoipCidrsOutput)
+
+ DescribeByoipCidrsPages(*ec2.DescribeByoipCidrsInput, func(*ec2.DescribeByoipCidrsOutput, bool) bool) error
+ DescribeByoipCidrsPagesWithContext(aws.Context, *ec2.DescribeByoipCidrsInput, func(*ec2.DescribeByoipCidrsOutput, bool) bool, ...request.Option) error
+
+ DescribeCapacityReservationFleets(*ec2.DescribeCapacityReservationFleetsInput) (*ec2.DescribeCapacityReservationFleetsOutput, error)
+ DescribeCapacityReservationFleetsWithContext(aws.Context, *ec2.DescribeCapacityReservationFleetsInput, ...request.Option) (*ec2.DescribeCapacityReservationFleetsOutput, error)
+ DescribeCapacityReservationFleetsRequest(*ec2.DescribeCapacityReservationFleetsInput) (*request.Request, *ec2.DescribeCapacityReservationFleetsOutput)
+
+ DescribeCapacityReservationFleetsPages(*ec2.DescribeCapacityReservationFleetsInput, func(*ec2.DescribeCapacityReservationFleetsOutput, bool) bool) error
+ DescribeCapacityReservationFleetsPagesWithContext(aws.Context, *ec2.DescribeCapacityReservationFleetsInput, func(*ec2.DescribeCapacityReservationFleetsOutput, bool) bool, ...request.Option) error
+
+ DescribeCapacityReservations(*ec2.DescribeCapacityReservationsInput) (*ec2.DescribeCapacityReservationsOutput, error)
+ DescribeCapacityReservationsWithContext(aws.Context, *ec2.DescribeCapacityReservationsInput, ...request.Option) (*ec2.DescribeCapacityReservationsOutput, error)
+ DescribeCapacityReservationsRequest(*ec2.DescribeCapacityReservationsInput) (*request.Request, *ec2.DescribeCapacityReservationsOutput)
+
+ DescribeCapacityReservationsPages(*ec2.DescribeCapacityReservationsInput, func(*ec2.DescribeCapacityReservationsOutput, bool) bool) error
+ DescribeCapacityReservationsPagesWithContext(aws.Context, *ec2.DescribeCapacityReservationsInput, func(*ec2.DescribeCapacityReservationsOutput, bool) bool, ...request.Option) error
+
+ DescribeCarrierGateways(*ec2.DescribeCarrierGatewaysInput) (*ec2.DescribeCarrierGatewaysOutput, error)
+ DescribeCarrierGatewaysWithContext(aws.Context, *ec2.DescribeCarrierGatewaysInput, ...request.Option) (*ec2.DescribeCarrierGatewaysOutput, error)
+ DescribeCarrierGatewaysRequest(*ec2.DescribeCarrierGatewaysInput) (*request.Request, *ec2.DescribeCarrierGatewaysOutput)
+
+ DescribeCarrierGatewaysPages(*ec2.DescribeCarrierGatewaysInput, func(*ec2.DescribeCarrierGatewaysOutput, bool) bool) error
+ DescribeCarrierGatewaysPagesWithContext(aws.Context, *ec2.DescribeCarrierGatewaysInput, func(*ec2.DescribeCarrierGatewaysOutput, bool) bool, ...request.Option) error
+
+ DescribeClassicLinkInstances(*ec2.DescribeClassicLinkInstancesInput) (*ec2.DescribeClassicLinkInstancesOutput, error)
+ DescribeClassicLinkInstancesWithContext(aws.Context, *ec2.DescribeClassicLinkInstancesInput, ...request.Option) (*ec2.DescribeClassicLinkInstancesOutput, error)
+ DescribeClassicLinkInstancesRequest(*ec2.DescribeClassicLinkInstancesInput) (*request.Request, *ec2.DescribeClassicLinkInstancesOutput)
+
+ DescribeClassicLinkInstancesPages(*ec2.DescribeClassicLinkInstancesInput, func(*ec2.DescribeClassicLinkInstancesOutput, bool) bool) error
+ DescribeClassicLinkInstancesPagesWithContext(aws.Context, *ec2.DescribeClassicLinkInstancesInput, func(*ec2.DescribeClassicLinkInstancesOutput, bool) bool, ...request.Option) error
+
+ DescribeClientVpnAuthorizationRules(*ec2.DescribeClientVpnAuthorizationRulesInput) (*ec2.DescribeClientVpnAuthorizationRulesOutput, error)
+ DescribeClientVpnAuthorizationRulesWithContext(aws.Context, *ec2.DescribeClientVpnAuthorizationRulesInput, ...request.Option) (*ec2.DescribeClientVpnAuthorizationRulesOutput, error)
+ DescribeClientVpnAuthorizationRulesRequest(*ec2.DescribeClientVpnAuthorizationRulesInput) (*request.Request, *ec2.DescribeClientVpnAuthorizationRulesOutput)
+
+ DescribeClientVpnAuthorizationRulesPages(*ec2.DescribeClientVpnAuthorizationRulesInput, func(*ec2.DescribeClientVpnAuthorizationRulesOutput, bool) bool) error
+ DescribeClientVpnAuthorizationRulesPagesWithContext(aws.Context, *ec2.DescribeClientVpnAuthorizationRulesInput, func(*ec2.DescribeClientVpnAuthorizationRulesOutput, bool) bool, ...request.Option) error
+
+ DescribeClientVpnConnections(*ec2.DescribeClientVpnConnectionsInput) (*ec2.DescribeClientVpnConnectionsOutput, error)
+ DescribeClientVpnConnectionsWithContext(aws.Context, *ec2.DescribeClientVpnConnectionsInput, ...request.Option) (*ec2.DescribeClientVpnConnectionsOutput, error)
+ DescribeClientVpnConnectionsRequest(*ec2.DescribeClientVpnConnectionsInput) (*request.Request, *ec2.DescribeClientVpnConnectionsOutput)
+
+ DescribeClientVpnConnectionsPages(*ec2.DescribeClientVpnConnectionsInput, func(*ec2.DescribeClientVpnConnectionsOutput, bool) bool) error
+ DescribeClientVpnConnectionsPagesWithContext(aws.Context, *ec2.DescribeClientVpnConnectionsInput, func(*ec2.DescribeClientVpnConnectionsOutput, bool) bool, ...request.Option) error
+
+ DescribeClientVpnEndpoints(*ec2.DescribeClientVpnEndpointsInput) (*ec2.DescribeClientVpnEndpointsOutput, error)
+ DescribeClientVpnEndpointsWithContext(aws.Context, *ec2.DescribeClientVpnEndpointsInput, ...request.Option) (*ec2.DescribeClientVpnEndpointsOutput, error)
+ DescribeClientVpnEndpointsRequest(*ec2.DescribeClientVpnEndpointsInput) (*request.Request, *ec2.DescribeClientVpnEndpointsOutput)
+
+ DescribeClientVpnEndpointsPages(*ec2.DescribeClientVpnEndpointsInput, func(*ec2.DescribeClientVpnEndpointsOutput, bool) bool) error
+ DescribeClientVpnEndpointsPagesWithContext(aws.Context, *ec2.DescribeClientVpnEndpointsInput, func(*ec2.DescribeClientVpnEndpointsOutput, bool) bool, ...request.Option) error
+
+ DescribeClientVpnRoutes(*ec2.DescribeClientVpnRoutesInput) (*ec2.DescribeClientVpnRoutesOutput, error)
+ DescribeClientVpnRoutesWithContext(aws.Context, *ec2.DescribeClientVpnRoutesInput, ...request.Option) (*ec2.DescribeClientVpnRoutesOutput, error)
+ DescribeClientVpnRoutesRequest(*ec2.DescribeClientVpnRoutesInput) (*request.Request, *ec2.DescribeClientVpnRoutesOutput)
+
+ DescribeClientVpnRoutesPages(*ec2.DescribeClientVpnRoutesInput, func(*ec2.DescribeClientVpnRoutesOutput, bool) bool) error
+ DescribeClientVpnRoutesPagesWithContext(aws.Context, *ec2.DescribeClientVpnRoutesInput, func(*ec2.DescribeClientVpnRoutesOutput, bool) bool, ...request.Option) error
+
+ DescribeClientVpnTargetNetworks(*ec2.DescribeClientVpnTargetNetworksInput) (*ec2.DescribeClientVpnTargetNetworksOutput, error)
+ DescribeClientVpnTargetNetworksWithContext(aws.Context, *ec2.DescribeClientVpnTargetNetworksInput, ...request.Option) (*ec2.DescribeClientVpnTargetNetworksOutput, error)
+ DescribeClientVpnTargetNetworksRequest(*ec2.DescribeClientVpnTargetNetworksInput) (*request.Request, *ec2.DescribeClientVpnTargetNetworksOutput)
+
+ DescribeClientVpnTargetNetworksPages(*ec2.DescribeClientVpnTargetNetworksInput, func(*ec2.DescribeClientVpnTargetNetworksOutput, bool) bool) error
+ DescribeClientVpnTargetNetworksPagesWithContext(aws.Context, *ec2.DescribeClientVpnTargetNetworksInput, func(*ec2.DescribeClientVpnTargetNetworksOutput, bool) bool, ...request.Option) error
+
+ DescribeCoipPools(*ec2.DescribeCoipPoolsInput) (*ec2.DescribeCoipPoolsOutput, error)
+ DescribeCoipPoolsWithContext(aws.Context, *ec2.DescribeCoipPoolsInput, ...request.Option) (*ec2.DescribeCoipPoolsOutput, error)
+ DescribeCoipPoolsRequest(*ec2.DescribeCoipPoolsInput) (*request.Request, *ec2.DescribeCoipPoolsOutput)
+
+ DescribeCoipPoolsPages(*ec2.DescribeCoipPoolsInput, func(*ec2.DescribeCoipPoolsOutput, bool) bool) error
+ DescribeCoipPoolsPagesWithContext(aws.Context, *ec2.DescribeCoipPoolsInput, func(*ec2.DescribeCoipPoolsOutput, bool) bool, ...request.Option) error
+
+ DescribeConversionTasks(*ec2.DescribeConversionTasksInput) (*ec2.DescribeConversionTasksOutput, error)
+ DescribeConversionTasksWithContext(aws.Context, *ec2.DescribeConversionTasksInput, ...request.Option) (*ec2.DescribeConversionTasksOutput, error)
+ DescribeConversionTasksRequest(*ec2.DescribeConversionTasksInput) (*request.Request, *ec2.DescribeConversionTasksOutput)
+
+ DescribeCustomerGateways(*ec2.DescribeCustomerGatewaysInput) (*ec2.DescribeCustomerGatewaysOutput, error)
+ DescribeCustomerGatewaysWithContext(aws.Context, *ec2.DescribeCustomerGatewaysInput, ...request.Option) (*ec2.DescribeCustomerGatewaysOutput, error)
+ DescribeCustomerGatewaysRequest(*ec2.DescribeCustomerGatewaysInput) (*request.Request, *ec2.DescribeCustomerGatewaysOutput)
+
+ DescribeDhcpOptions(*ec2.DescribeDhcpOptionsInput) (*ec2.DescribeDhcpOptionsOutput, error)
+ DescribeDhcpOptionsWithContext(aws.Context, *ec2.DescribeDhcpOptionsInput, ...request.Option) (*ec2.DescribeDhcpOptionsOutput, error)
+ DescribeDhcpOptionsRequest(*ec2.DescribeDhcpOptionsInput) (*request.Request, *ec2.DescribeDhcpOptionsOutput)
+
+ DescribeDhcpOptionsPages(*ec2.DescribeDhcpOptionsInput, func(*ec2.DescribeDhcpOptionsOutput, bool) bool) error
+ DescribeDhcpOptionsPagesWithContext(aws.Context, *ec2.DescribeDhcpOptionsInput, func(*ec2.DescribeDhcpOptionsOutput, bool) bool, ...request.Option) error
+
+ DescribeEgressOnlyInternetGateways(*ec2.DescribeEgressOnlyInternetGatewaysInput) (*ec2.DescribeEgressOnlyInternetGatewaysOutput, error)
+ DescribeEgressOnlyInternetGatewaysWithContext(aws.Context, *ec2.DescribeEgressOnlyInternetGatewaysInput, ...request.Option) (*ec2.DescribeEgressOnlyInternetGatewaysOutput, error)
+ DescribeEgressOnlyInternetGatewaysRequest(*ec2.DescribeEgressOnlyInternetGatewaysInput) (*request.Request, *ec2.DescribeEgressOnlyInternetGatewaysOutput)
+
+ DescribeEgressOnlyInternetGatewaysPages(*ec2.DescribeEgressOnlyInternetGatewaysInput, func(*ec2.DescribeEgressOnlyInternetGatewaysOutput, bool) bool) error
+ DescribeEgressOnlyInternetGatewaysPagesWithContext(aws.Context, *ec2.DescribeEgressOnlyInternetGatewaysInput, func(*ec2.DescribeEgressOnlyInternetGatewaysOutput, bool) bool, ...request.Option) error
+
+ DescribeElasticGpus(*ec2.DescribeElasticGpusInput) (*ec2.DescribeElasticGpusOutput, error)
+ DescribeElasticGpusWithContext(aws.Context, *ec2.DescribeElasticGpusInput, ...request.Option) (*ec2.DescribeElasticGpusOutput, error)
+ DescribeElasticGpusRequest(*ec2.DescribeElasticGpusInput) (*request.Request, *ec2.DescribeElasticGpusOutput)
+
+ DescribeExportImageTasks(*ec2.DescribeExportImageTasksInput) (*ec2.DescribeExportImageTasksOutput, error)
+ DescribeExportImageTasksWithContext(aws.Context, *ec2.DescribeExportImageTasksInput, ...request.Option) (*ec2.DescribeExportImageTasksOutput, error)
+ DescribeExportImageTasksRequest(*ec2.DescribeExportImageTasksInput) (*request.Request, *ec2.DescribeExportImageTasksOutput)
+
+ DescribeExportImageTasksPages(*ec2.DescribeExportImageTasksInput, func(*ec2.DescribeExportImageTasksOutput, bool) bool) error
+ DescribeExportImageTasksPagesWithContext(aws.Context, *ec2.DescribeExportImageTasksInput, func(*ec2.DescribeExportImageTasksOutput, bool) bool, ...request.Option) error
+
+ DescribeExportTasks(*ec2.DescribeExportTasksInput) (*ec2.DescribeExportTasksOutput, error)
+ DescribeExportTasksWithContext(aws.Context, *ec2.DescribeExportTasksInput, ...request.Option) (*ec2.DescribeExportTasksOutput, error)
+ DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput)
+
+ DescribeFastLaunchImages(*ec2.DescribeFastLaunchImagesInput) (*ec2.DescribeFastLaunchImagesOutput, error)
+ DescribeFastLaunchImagesWithContext(aws.Context, *ec2.DescribeFastLaunchImagesInput, ...request.Option) (*ec2.DescribeFastLaunchImagesOutput, error)
+ DescribeFastLaunchImagesRequest(*ec2.DescribeFastLaunchImagesInput) (*request.Request, *ec2.DescribeFastLaunchImagesOutput)
+
+ DescribeFastLaunchImagesPages(*ec2.DescribeFastLaunchImagesInput, func(*ec2.DescribeFastLaunchImagesOutput, bool) bool) error
+ DescribeFastLaunchImagesPagesWithContext(aws.Context, *ec2.DescribeFastLaunchImagesInput, func(*ec2.DescribeFastLaunchImagesOutput, bool) bool, ...request.Option) error
+
+ DescribeFastSnapshotRestores(*ec2.DescribeFastSnapshotRestoresInput) (*ec2.DescribeFastSnapshotRestoresOutput, error)
+ DescribeFastSnapshotRestoresWithContext(aws.Context, *ec2.DescribeFastSnapshotRestoresInput, ...request.Option) (*ec2.DescribeFastSnapshotRestoresOutput, error)
+ DescribeFastSnapshotRestoresRequest(*ec2.DescribeFastSnapshotRestoresInput) (*request.Request, *ec2.DescribeFastSnapshotRestoresOutput)
+
+ DescribeFastSnapshotRestoresPages(*ec2.DescribeFastSnapshotRestoresInput, func(*ec2.DescribeFastSnapshotRestoresOutput, bool) bool) error
+ DescribeFastSnapshotRestoresPagesWithContext(aws.Context, *ec2.DescribeFastSnapshotRestoresInput, func(*ec2.DescribeFastSnapshotRestoresOutput, bool) bool, ...request.Option) error
+
+ DescribeFleetHistory(*ec2.DescribeFleetHistoryInput) (*ec2.DescribeFleetHistoryOutput, error)
+ DescribeFleetHistoryWithContext(aws.Context, *ec2.DescribeFleetHistoryInput, ...request.Option) (*ec2.DescribeFleetHistoryOutput, error)
+ DescribeFleetHistoryRequest(*ec2.DescribeFleetHistoryInput) (*request.Request, *ec2.DescribeFleetHistoryOutput)
+
+ DescribeFleetInstances(*ec2.DescribeFleetInstancesInput) (*ec2.DescribeFleetInstancesOutput, error)
+ DescribeFleetInstancesWithContext(aws.Context, *ec2.DescribeFleetInstancesInput, ...request.Option) (*ec2.DescribeFleetInstancesOutput, error)
+ DescribeFleetInstancesRequest(*ec2.DescribeFleetInstancesInput) (*request.Request, *ec2.DescribeFleetInstancesOutput)
+
+ DescribeFleets(*ec2.DescribeFleetsInput) (*ec2.DescribeFleetsOutput, error)
+ DescribeFleetsWithContext(aws.Context, *ec2.DescribeFleetsInput, ...request.Option) (*ec2.DescribeFleetsOutput, error)
+ DescribeFleetsRequest(*ec2.DescribeFleetsInput) (*request.Request, *ec2.DescribeFleetsOutput)
+
+ DescribeFleetsPages(*ec2.DescribeFleetsInput, func(*ec2.DescribeFleetsOutput, bool) bool) error
+ DescribeFleetsPagesWithContext(aws.Context, *ec2.DescribeFleetsInput, func(*ec2.DescribeFleetsOutput, bool) bool, ...request.Option) error
+
+ DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error)
+ DescribeFlowLogsWithContext(aws.Context, *ec2.DescribeFlowLogsInput, ...request.Option) (*ec2.DescribeFlowLogsOutput, error)
+ DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput)
+
+ DescribeFlowLogsPages(*ec2.DescribeFlowLogsInput, func(*ec2.DescribeFlowLogsOutput, bool) bool) error
+ DescribeFlowLogsPagesWithContext(aws.Context, *ec2.DescribeFlowLogsInput, func(*ec2.DescribeFlowLogsOutput, bool) bool, ...request.Option) error
+
+ DescribeFpgaImageAttribute(*ec2.DescribeFpgaImageAttributeInput) (*ec2.DescribeFpgaImageAttributeOutput, error)
+ DescribeFpgaImageAttributeWithContext(aws.Context, *ec2.DescribeFpgaImageAttributeInput, ...request.Option) (*ec2.DescribeFpgaImageAttributeOutput, error)
+ DescribeFpgaImageAttributeRequest(*ec2.DescribeFpgaImageAttributeInput) (*request.Request, *ec2.DescribeFpgaImageAttributeOutput)
+
+ DescribeFpgaImages(*ec2.DescribeFpgaImagesInput) (*ec2.DescribeFpgaImagesOutput, error)
+ DescribeFpgaImagesWithContext(aws.Context, *ec2.DescribeFpgaImagesInput, ...request.Option) (*ec2.DescribeFpgaImagesOutput, error)
+ DescribeFpgaImagesRequest(*ec2.DescribeFpgaImagesInput) (*request.Request, *ec2.DescribeFpgaImagesOutput)
+
+ DescribeFpgaImagesPages(*ec2.DescribeFpgaImagesInput, func(*ec2.DescribeFpgaImagesOutput, bool) bool) error
+ DescribeFpgaImagesPagesWithContext(aws.Context, *ec2.DescribeFpgaImagesInput, func(*ec2.DescribeFpgaImagesOutput, bool) bool, ...request.Option) error
+
+ DescribeHostReservationOfferings(*ec2.DescribeHostReservationOfferingsInput) (*ec2.DescribeHostReservationOfferingsOutput, error)
+ DescribeHostReservationOfferingsWithContext(aws.Context, *ec2.DescribeHostReservationOfferingsInput, ...request.Option) (*ec2.DescribeHostReservationOfferingsOutput, error)
+ DescribeHostReservationOfferingsRequest(*ec2.DescribeHostReservationOfferingsInput) (*request.Request, *ec2.DescribeHostReservationOfferingsOutput)
+
+ DescribeHostReservationOfferingsPages(*ec2.DescribeHostReservationOfferingsInput, func(*ec2.DescribeHostReservationOfferingsOutput, bool) bool) error
+ DescribeHostReservationOfferingsPagesWithContext(aws.Context, *ec2.DescribeHostReservationOfferingsInput, func(*ec2.DescribeHostReservationOfferingsOutput, bool) bool, ...request.Option) error
+
+ DescribeHostReservations(*ec2.DescribeHostReservationsInput) (*ec2.DescribeHostReservationsOutput, error)
+ DescribeHostReservationsWithContext(aws.Context, *ec2.DescribeHostReservationsInput, ...request.Option) (*ec2.DescribeHostReservationsOutput, error)
+ DescribeHostReservationsRequest(*ec2.DescribeHostReservationsInput) (*request.Request, *ec2.DescribeHostReservationsOutput)
+
+ DescribeHostReservationsPages(*ec2.DescribeHostReservationsInput, func(*ec2.DescribeHostReservationsOutput, bool) bool) error
+ DescribeHostReservationsPagesWithContext(aws.Context, *ec2.DescribeHostReservationsInput, func(*ec2.DescribeHostReservationsOutput, bool) bool, ...request.Option) error
+
+ DescribeHosts(*ec2.DescribeHostsInput) (*ec2.DescribeHostsOutput, error)
+ DescribeHostsWithContext(aws.Context, *ec2.DescribeHostsInput, ...request.Option) (*ec2.DescribeHostsOutput, error)
+ DescribeHostsRequest(*ec2.DescribeHostsInput) (*request.Request, *ec2.DescribeHostsOutput)
+
+ DescribeHostsPages(*ec2.DescribeHostsInput, func(*ec2.DescribeHostsOutput, bool) bool) error
+ DescribeHostsPagesWithContext(aws.Context, *ec2.DescribeHostsInput, func(*ec2.DescribeHostsOutput, bool) bool, ...request.Option) error
+
+ DescribeIamInstanceProfileAssociations(*ec2.DescribeIamInstanceProfileAssociationsInput) (*ec2.DescribeIamInstanceProfileAssociationsOutput, error)
+ DescribeIamInstanceProfileAssociationsWithContext(aws.Context, *ec2.DescribeIamInstanceProfileAssociationsInput, ...request.Option) (*ec2.DescribeIamInstanceProfileAssociationsOutput, error)
+ DescribeIamInstanceProfileAssociationsRequest(*ec2.DescribeIamInstanceProfileAssociationsInput) (*request.Request, *ec2.DescribeIamInstanceProfileAssociationsOutput)
+
+ DescribeIamInstanceProfileAssociationsPages(*ec2.DescribeIamInstanceProfileAssociationsInput, func(*ec2.DescribeIamInstanceProfileAssociationsOutput, bool) bool) error
+ DescribeIamInstanceProfileAssociationsPagesWithContext(aws.Context, *ec2.DescribeIamInstanceProfileAssociationsInput, func(*ec2.DescribeIamInstanceProfileAssociationsOutput, bool) bool, ...request.Option) error
+
+ DescribeIdFormat(*ec2.DescribeIdFormatInput) (*ec2.DescribeIdFormatOutput, error)
+ DescribeIdFormatWithContext(aws.Context, *ec2.DescribeIdFormatInput, ...request.Option) (*ec2.DescribeIdFormatOutput, error)
+ DescribeIdFormatRequest(*ec2.DescribeIdFormatInput) (*request.Request, *ec2.DescribeIdFormatOutput)
+
+ DescribeIdentityIdFormat(*ec2.DescribeIdentityIdFormatInput) (*ec2.DescribeIdentityIdFormatOutput, error)
+ DescribeIdentityIdFormatWithContext(aws.Context, *ec2.DescribeIdentityIdFormatInput, ...request.Option) (*ec2.DescribeIdentityIdFormatOutput, error)
+ DescribeIdentityIdFormatRequest(*ec2.DescribeIdentityIdFormatInput) (*request.Request, *ec2.DescribeIdentityIdFormatOutput)
+
+ DescribeImageAttribute(*ec2.DescribeImageAttributeInput) (*ec2.DescribeImageAttributeOutput, error)
+ DescribeImageAttributeWithContext(aws.Context, *ec2.DescribeImageAttributeInput, ...request.Option) (*ec2.DescribeImageAttributeOutput, error)
+ DescribeImageAttributeRequest(*ec2.DescribeImageAttributeInput) (*request.Request, *ec2.DescribeImageAttributeOutput)
+
+ DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error)
+ DescribeImagesWithContext(aws.Context, *ec2.DescribeImagesInput, ...request.Option) (*ec2.DescribeImagesOutput, error)
+ DescribeImagesRequest(*ec2.DescribeImagesInput) (*request.Request, *ec2.DescribeImagesOutput)
+
+ DescribeImagesPages(*ec2.DescribeImagesInput, func(*ec2.DescribeImagesOutput, bool) bool) error
+ DescribeImagesPagesWithContext(aws.Context, *ec2.DescribeImagesInput, func(*ec2.DescribeImagesOutput, bool) bool, ...request.Option) error
+
+ DescribeImportImageTasks(*ec2.DescribeImportImageTasksInput) (*ec2.DescribeImportImageTasksOutput, error)
+ DescribeImportImageTasksWithContext(aws.Context, *ec2.DescribeImportImageTasksInput, ...request.Option) (*ec2.DescribeImportImageTasksOutput, error)
+ DescribeImportImageTasksRequest(*ec2.DescribeImportImageTasksInput) (*request.Request, *ec2.DescribeImportImageTasksOutput)
+
+ DescribeImportImageTasksPages(*ec2.DescribeImportImageTasksInput, func(*ec2.DescribeImportImageTasksOutput, bool) bool) error
+ DescribeImportImageTasksPagesWithContext(aws.Context, *ec2.DescribeImportImageTasksInput, func(*ec2.DescribeImportImageTasksOutput, bool) bool, ...request.Option) error
+
+ DescribeImportSnapshotTasks(*ec2.DescribeImportSnapshotTasksInput) (*ec2.DescribeImportSnapshotTasksOutput, error)
+ DescribeImportSnapshotTasksWithContext(aws.Context, *ec2.DescribeImportSnapshotTasksInput, ...request.Option) (*ec2.DescribeImportSnapshotTasksOutput, error)
+ DescribeImportSnapshotTasksRequest(*ec2.DescribeImportSnapshotTasksInput) (*request.Request, *ec2.DescribeImportSnapshotTasksOutput)
+
+ DescribeImportSnapshotTasksPages(*ec2.DescribeImportSnapshotTasksInput, func(*ec2.DescribeImportSnapshotTasksOutput, bool) bool) error
+ DescribeImportSnapshotTasksPagesWithContext(aws.Context, *ec2.DescribeImportSnapshotTasksInput, func(*ec2.DescribeImportSnapshotTasksOutput, bool) bool, ...request.Option) error
+
+ DescribeInstanceAttribute(*ec2.DescribeInstanceAttributeInput) (*ec2.DescribeInstanceAttributeOutput, error)
+ DescribeInstanceAttributeWithContext(aws.Context, *ec2.DescribeInstanceAttributeInput, ...request.Option) (*ec2.DescribeInstanceAttributeOutput, error)
+ DescribeInstanceAttributeRequest(*ec2.DescribeInstanceAttributeInput) (*request.Request, *ec2.DescribeInstanceAttributeOutput)
+
+ DescribeInstanceCreditSpecifications(*ec2.DescribeInstanceCreditSpecificationsInput) (*ec2.DescribeInstanceCreditSpecificationsOutput, error)
+ DescribeInstanceCreditSpecificationsWithContext(aws.Context, *ec2.DescribeInstanceCreditSpecificationsInput, ...request.Option) (*ec2.DescribeInstanceCreditSpecificationsOutput, error)
+ DescribeInstanceCreditSpecificationsRequest(*ec2.DescribeInstanceCreditSpecificationsInput) (*request.Request, *ec2.DescribeInstanceCreditSpecificationsOutput)
+
+ DescribeInstanceCreditSpecificationsPages(*ec2.DescribeInstanceCreditSpecificationsInput, func(*ec2.DescribeInstanceCreditSpecificationsOutput, bool) bool) error
+ DescribeInstanceCreditSpecificationsPagesWithContext(aws.Context, *ec2.DescribeInstanceCreditSpecificationsInput, func(*ec2.DescribeInstanceCreditSpecificationsOutput, bool) bool, ...request.Option) error
+
+ DescribeInstanceEventNotificationAttributes(*ec2.DescribeInstanceEventNotificationAttributesInput) (*ec2.DescribeInstanceEventNotificationAttributesOutput, error)
+ DescribeInstanceEventNotificationAttributesWithContext(aws.Context, *ec2.DescribeInstanceEventNotificationAttributesInput, ...request.Option) (*ec2.DescribeInstanceEventNotificationAttributesOutput, error)
+ DescribeInstanceEventNotificationAttributesRequest(*ec2.DescribeInstanceEventNotificationAttributesInput) (*request.Request, *ec2.DescribeInstanceEventNotificationAttributesOutput)
+
+ DescribeInstanceEventWindows(*ec2.DescribeInstanceEventWindowsInput) (*ec2.DescribeInstanceEventWindowsOutput, error)
+ DescribeInstanceEventWindowsWithContext(aws.Context, *ec2.DescribeInstanceEventWindowsInput, ...request.Option) (*ec2.DescribeInstanceEventWindowsOutput, error)
+ DescribeInstanceEventWindowsRequest(*ec2.DescribeInstanceEventWindowsInput) (*request.Request, *ec2.DescribeInstanceEventWindowsOutput)
+
+ DescribeInstanceEventWindowsPages(*ec2.DescribeInstanceEventWindowsInput, func(*ec2.DescribeInstanceEventWindowsOutput, bool) bool) error
+ DescribeInstanceEventWindowsPagesWithContext(aws.Context, *ec2.DescribeInstanceEventWindowsInput, func(*ec2.DescribeInstanceEventWindowsOutput, bool) bool, ...request.Option) error
+
+ DescribeInstanceStatus(*ec2.DescribeInstanceStatusInput) (*ec2.DescribeInstanceStatusOutput, error)
+ DescribeInstanceStatusWithContext(aws.Context, *ec2.DescribeInstanceStatusInput, ...request.Option) (*ec2.DescribeInstanceStatusOutput, error)
+ DescribeInstanceStatusRequest(*ec2.DescribeInstanceStatusInput) (*request.Request, *ec2.DescribeInstanceStatusOutput)
+
+ DescribeInstanceStatusPages(*ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool) error
+ DescribeInstanceStatusPagesWithContext(aws.Context, *ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool, ...request.Option) error
+
+ DescribeInstanceTypeOfferings(*ec2.DescribeInstanceTypeOfferingsInput) (*ec2.DescribeInstanceTypeOfferingsOutput, error)
+ DescribeInstanceTypeOfferingsWithContext(aws.Context, *ec2.DescribeInstanceTypeOfferingsInput, ...request.Option) (*ec2.DescribeInstanceTypeOfferingsOutput, error)
+ DescribeInstanceTypeOfferingsRequest(*ec2.DescribeInstanceTypeOfferingsInput) (*request.Request, *ec2.DescribeInstanceTypeOfferingsOutput)
+
+ DescribeInstanceTypeOfferingsPages(*ec2.DescribeInstanceTypeOfferingsInput, func(*ec2.DescribeInstanceTypeOfferingsOutput, bool) bool) error
+ DescribeInstanceTypeOfferingsPagesWithContext(aws.Context, *ec2.DescribeInstanceTypeOfferingsInput, func(*ec2.DescribeInstanceTypeOfferingsOutput, bool) bool, ...request.Option) error
+
+ DescribeInstanceTypes(*ec2.DescribeInstanceTypesInput) (*ec2.DescribeInstanceTypesOutput, error)
+ DescribeInstanceTypesWithContext(aws.Context, *ec2.DescribeInstanceTypesInput, ...request.Option) (*ec2.DescribeInstanceTypesOutput, error)
+ DescribeInstanceTypesRequest(*ec2.DescribeInstanceTypesInput) (*request.Request, *ec2.DescribeInstanceTypesOutput)
+
+ DescribeInstanceTypesPages(*ec2.DescribeInstanceTypesInput, func(*ec2.DescribeInstanceTypesOutput, bool) bool) error
+ DescribeInstanceTypesPagesWithContext(aws.Context, *ec2.DescribeInstanceTypesInput, func(*ec2.DescribeInstanceTypesOutput, bool) bool, ...request.Option) error
+
+ DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
+ DescribeInstancesWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.Option) (*ec2.DescribeInstancesOutput, error)
+ DescribeInstancesRequest(*ec2.DescribeInstancesInput) (*request.Request, *ec2.DescribeInstancesOutput)
+
+ DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool) error
+ DescribeInstancesPagesWithContext(aws.Context, *ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool, ...request.Option) error
+
+ DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error)
+ DescribeInternetGatewaysWithContext(aws.Context, *ec2.DescribeInternetGatewaysInput, ...request.Option) (*ec2.DescribeInternetGatewaysOutput, error)
+ DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput)
+
+ DescribeInternetGatewaysPages(*ec2.DescribeInternetGatewaysInput, func(*ec2.DescribeInternetGatewaysOutput, bool) bool) error
+ DescribeInternetGatewaysPagesWithContext(aws.Context, *ec2.DescribeInternetGatewaysInput, func(*ec2.DescribeInternetGatewaysOutput, bool) bool, ...request.Option) error
+
+ DescribeIpamPools(*ec2.DescribeIpamPoolsInput) (*ec2.DescribeIpamPoolsOutput, error)
+ DescribeIpamPoolsWithContext(aws.Context, *ec2.DescribeIpamPoolsInput, ...request.Option) (*ec2.DescribeIpamPoolsOutput, error)
+ DescribeIpamPoolsRequest(*ec2.DescribeIpamPoolsInput) (*request.Request, *ec2.DescribeIpamPoolsOutput)
+
+ DescribeIpamPoolsPages(*ec2.DescribeIpamPoolsInput, func(*ec2.DescribeIpamPoolsOutput, bool) bool) error
+ DescribeIpamPoolsPagesWithContext(aws.Context, *ec2.DescribeIpamPoolsInput, func(*ec2.DescribeIpamPoolsOutput, bool) bool, ...request.Option) error
+
+ DescribeIpamResourceDiscoveries(*ec2.DescribeIpamResourceDiscoveriesInput) (*ec2.DescribeIpamResourceDiscoveriesOutput, error)
+ DescribeIpamResourceDiscoveriesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveriesInput, ...request.Option) (*ec2.DescribeIpamResourceDiscoveriesOutput, error)
+ DescribeIpamResourceDiscoveriesRequest(*ec2.DescribeIpamResourceDiscoveriesInput) (*request.Request, *ec2.DescribeIpamResourceDiscoveriesOutput)
+
+ DescribeIpamResourceDiscoveriesPages(*ec2.DescribeIpamResourceDiscoveriesInput, func(*ec2.DescribeIpamResourceDiscoveriesOutput, bool) bool) error
+ DescribeIpamResourceDiscoveriesPagesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveriesInput, func(*ec2.DescribeIpamResourceDiscoveriesOutput, bool) bool, ...request.Option) error
+
+ DescribeIpamResourceDiscoveryAssociations(*ec2.DescribeIpamResourceDiscoveryAssociationsInput) (*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, error)
+ DescribeIpamResourceDiscoveryAssociationsWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveryAssociationsInput, ...request.Option) (*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, error)
+ DescribeIpamResourceDiscoveryAssociationsRequest(*ec2.DescribeIpamResourceDiscoveryAssociationsInput) (*request.Request, *ec2.DescribeIpamResourceDiscoveryAssociationsOutput)
+
+ DescribeIpamResourceDiscoveryAssociationsPages(*ec2.DescribeIpamResourceDiscoveryAssociationsInput, func(*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, bool) bool) error
+ DescribeIpamResourceDiscoveryAssociationsPagesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveryAssociationsInput, func(*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, bool) bool, ...request.Option) error
+
+ DescribeIpamScopes(*ec2.DescribeIpamScopesInput) (*ec2.DescribeIpamScopesOutput, error)
+ DescribeIpamScopesWithContext(aws.Context, *ec2.DescribeIpamScopesInput, ...request.Option) (*ec2.DescribeIpamScopesOutput, error)
+ DescribeIpamScopesRequest(*ec2.DescribeIpamScopesInput) (*request.Request, *ec2.DescribeIpamScopesOutput)
+
+ DescribeIpamScopesPages(*ec2.DescribeIpamScopesInput, func(*ec2.DescribeIpamScopesOutput, bool) bool) error
+ DescribeIpamScopesPagesWithContext(aws.Context, *ec2.DescribeIpamScopesInput, func(*ec2.DescribeIpamScopesOutput, bool) bool, ...request.Option) error
+
+ DescribeIpams(*ec2.DescribeIpamsInput) (*ec2.DescribeIpamsOutput, error)
+ DescribeIpamsWithContext(aws.Context, *ec2.DescribeIpamsInput, ...request.Option) (*ec2.DescribeIpamsOutput, error)
+ DescribeIpamsRequest(*ec2.DescribeIpamsInput) (*request.Request, *ec2.DescribeIpamsOutput)
+
+ DescribeIpamsPages(*ec2.DescribeIpamsInput, func(*ec2.DescribeIpamsOutput, bool) bool) error
+ DescribeIpamsPagesWithContext(aws.Context, *ec2.DescribeIpamsInput, func(*ec2.DescribeIpamsOutput, bool) bool, ...request.Option) error
+
+ DescribeIpv6Pools(*ec2.DescribeIpv6PoolsInput) (*ec2.DescribeIpv6PoolsOutput, error)
+ DescribeIpv6PoolsWithContext(aws.Context, *ec2.DescribeIpv6PoolsInput, ...request.Option) (*ec2.DescribeIpv6PoolsOutput, error)
+ DescribeIpv6PoolsRequest(*ec2.DescribeIpv6PoolsInput) (*request.Request, *ec2.DescribeIpv6PoolsOutput)
+
+ DescribeIpv6PoolsPages(*ec2.DescribeIpv6PoolsInput, func(*ec2.DescribeIpv6PoolsOutput, bool) bool) error
+ DescribeIpv6PoolsPagesWithContext(aws.Context, *ec2.DescribeIpv6PoolsInput, func(*ec2.DescribeIpv6PoolsOutput, bool) bool, ...request.Option) error
+
+ DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error)
+ DescribeKeyPairsWithContext(aws.Context, *ec2.DescribeKeyPairsInput, ...request.Option) (*ec2.DescribeKeyPairsOutput, error)
+ DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput)
+
+ DescribeLaunchTemplateVersions(*ec2.DescribeLaunchTemplateVersionsInput) (*ec2.DescribeLaunchTemplateVersionsOutput, error)
+ DescribeLaunchTemplateVersionsWithContext(aws.Context, *ec2.DescribeLaunchTemplateVersionsInput, ...request.Option) (*ec2.DescribeLaunchTemplateVersionsOutput, error)
+ DescribeLaunchTemplateVersionsRequest(*ec2.DescribeLaunchTemplateVersionsInput) (*request.Request, *ec2.DescribeLaunchTemplateVersionsOutput)
+
+ DescribeLaunchTemplateVersionsPages(*ec2.DescribeLaunchTemplateVersionsInput, func(*ec2.DescribeLaunchTemplateVersionsOutput, bool) bool) error
+ DescribeLaunchTemplateVersionsPagesWithContext(aws.Context, *ec2.DescribeLaunchTemplateVersionsInput, func(*ec2.DescribeLaunchTemplateVersionsOutput, bool) bool, ...request.Option) error
+
+ DescribeLaunchTemplates(*ec2.DescribeLaunchTemplatesInput) (*ec2.DescribeLaunchTemplatesOutput, error)
+ DescribeLaunchTemplatesWithContext(aws.Context, *ec2.DescribeLaunchTemplatesInput, ...request.Option) (*ec2.DescribeLaunchTemplatesOutput, error)
+ DescribeLaunchTemplatesRequest(*ec2.DescribeLaunchTemplatesInput) (*request.Request, *ec2.DescribeLaunchTemplatesOutput)
+
+ DescribeLaunchTemplatesPages(*ec2.DescribeLaunchTemplatesInput, func(*ec2.DescribeLaunchTemplatesOutput, bool) bool) error
+ DescribeLaunchTemplatesPagesWithContext(aws.Context, *ec2.DescribeLaunchTemplatesInput, func(*ec2.DescribeLaunchTemplatesOutput, bool) bool, ...request.Option) error
+
+ DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) (*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, error)
+ DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, ...request.Option) (*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, error)
+ DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) (*request.Request, *ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput)
+
+ DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, func(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, bool) bool) error
+ DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, func(*ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, bool) bool, ...request.Option) error
+
+ DescribeLocalGatewayRouteTableVpcAssociations(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput) (*ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, error)
+ DescribeLocalGatewayRouteTableVpcAssociationsWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput, ...request.Option) (*ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, error)
+ DescribeLocalGatewayRouteTableVpcAssociationsRequest(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput) (*request.Request, *ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput)
+
+ DescribeLocalGatewayRouteTableVpcAssociationsPages(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput, func(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, bool) bool) error
+ DescribeLocalGatewayRouteTableVpcAssociationsPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput, func(*ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, bool) bool, ...request.Option) error
+
+ DescribeLocalGatewayRouteTables(*ec2.DescribeLocalGatewayRouteTablesInput) (*ec2.DescribeLocalGatewayRouteTablesOutput, error)
+ DescribeLocalGatewayRouteTablesWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTablesInput, ...request.Option) (*ec2.DescribeLocalGatewayRouteTablesOutput, error)
+ DescribeLocalGatewayRouteTablesRequest(*ec2.DescribeLocalGatewayRouteTablesInput) (*request.Request, *ec2.DescribeLocalGatewayRouteTablesOutput)
+
+ DescribeLocalGatewayRouteTablesPages(*ec2.DescribeLocalGatewayRouteTablesInput, func(*ec2.DescribeLocalGatewayRouteTablesOutput, bool) bool) error
+ DescribeLocalGatewayRouteTablesPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayRouteTablesInput, func(*ec2.DescribeLocalGatewayRouteTablesOutput, bool) bool, ...request.Option) error
+
+ DescribeLocalGatewayVirtualInterfaceGroups(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput) (*ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, error)
+ DescribeLocalGatewayVirtualInterfaceGroupsWithContext(aws.Context, *ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput, ...request.Option) (*ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, error)
+ DescribeLocalGatewayVirtualInterfaceGroupsRequest(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput) (*request.Request, *ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput)
+
+ DescribeLocalGatewayVirtualInterfaceGroupsPages(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput, func(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, bool) bool) error
+ DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput, func(*ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, bool) bool, ...request.Option) error
+
+ DescribeLocalGatewayVirtualInterfaces(*ec2.DescribeLocalGatewayVirtualInterfacesInput) (*ec2.DescribeLocalGatewayVirtualInterfacesOutput, error)
+ DescribeLocalGatewayVirtualInterfacesWithContext(aws.Context, *ec2.DescribeLocalGatewayVirtualInterfacesInput, ...request.Option) (*ec2.DescribeLocalGatewayVirtualInterfacesOutput, error)
+ DescribeLocalGatewayVirtualInterfacesRequest(*ec2.DescribeLocalGatewayVirtualInterfacesInput) (*request.Request, *ec2.DescribeLocalGatewayVirtualInterfacesOutput)
+
+ DescribeLocalGatewayVirtualInterfacesPages(*ec2.DescribeLocalGatewayVirtualInterfacesInput, func(*ec2.DescribeLocalGatewayVirtualInterfacesOutput, bool) bool) error
+ DescribeLocalGatewayVirtualInterfacesPagesWithContext(aws.Context, *ec2.DescribeLocalGatewayVirtualInterfacesInput, func(*ec2.DescribeLocalGatewayVirtualInterfacesOutput, bool) bool, ...request.Option) error
+
+ DescribeLocalGateways(*ec2.DescribeLocalGatewaysInput) (*ec2.DescribeLocalGatewaysOutput, error)
+ DescribeLocalGatewaysWithContext(aws.Context, *ec2.DescribeLocalGatewaysInput, ...request.Option) (*ec2.DescribeLocalGatewaysOutput, error)
+ DescribeLocalGatewaysRequest(*ec2.DescribeLocalGatewaysInput) (*request.Request, *ec2.DescribeLocalGatewaysOutput)
+
+ DescribeLocalGatewaysPages(*ec2.DescribeLocalGatewaysInput, func(*ec2.DescribeLocalGatewaysOutput, bool) bool) error
+ DescribeLocalGatewaysPagesWithContext(aws.Context, *ec2.DescribeLocalGatewaysInput, func(*ec2.DescribeLocalGatewaysOutput, bool) bool, ...request.Option) error
+
+ DescribeManagedPrefixLists(*ec2.DescribeManagedPrefixListsInput) (*ec2.DescribeManagedPrefixListsOutput, error)
+ DescribeManagedPrefixListsWithContext(aws.Context, *ec2.DescribeManagedPrefixListsInput, ...request.Option) (*ec2.DescribeManagedPrefixListsOutput, error)
+ DescribeManagedPrefixListsRequest(*ec2.DescribeManagedPrefixListsInput) (*request.Request, *ec2.DescribeManagedPrefixListsOutput)
+
+ DescribeManagedPrefixListsPages(*ec2.DescribeManagedPrefixListsInput, func(*ec2.DescribeManagedPrefixListsOutput, bool) bool) error
+ DescribeManagedPrefixListsPagesWithContext(aws.Context, *ec2.DescribeManagedPrefixListsInput, func(*ec2.DescribeManagedPrefixListsOutput, bool) bool, ...request.Option) error
+
+ DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error)
+ DescribeMovingAddressesWithContext(aws.Context, *ec2.DescribeMovingAddressesInput, ...request.Option) (*ec2.DescribeMovingAddressesOutput, error)
+ DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput)
+
+ DescribeMovingAddressesPages(*ec2.DescribeMovingAddressesInput, func(*ec2.DescribeMovingAddressesOutput, bool) bool) error
+ DescribeMovingAddressesPagesWithContext(aws.Context, *ec2.DescribeMovingAddressesInput, func(*ec2.DescribeMovingAddressesOutput, bool) bool, ...request.Option) error
+
+ DescribeNatGateways(*ec2.DescribeNatGatewaysInput) (*ec2.DescribeNatGatewaysOutput, error)
+ DescribeNatGatewaysWithContext(aws.Context, *ec2.DescribeNatGatewaysInput, ...request.Option) (*ec2.DescribeNatGatewaysOutput, error)
+ DescribeNatGatewaysRequest(*ec2.DescribeNatGatewaysInput) (*request.Request, *ec2.DescribeNatGatewaysOutput)
+
+ DescribeNatGatewaysPages(*ec2.DescribeNatGatewaysInput, func(*ec2.DescribeNatGatewaysOutput, bool) bool) error
+ DescribeNatGatewaysPagesWithContext(aws.Context, *ec2.DescribeNatGatewaysInput, func(*ec2.DescribeNatGatewaysOutput, bool) bool, ...request.Option) error
+
+ DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error)
+ DescribeNetworkAclsWithContext(aws.Context, *ec2.DescribeNetworkAclsInput, ...request.Option) (*ec2.DescribeNetworkAclsOutput, error)
+ DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput)
+
+ DescribeNetworkAclsPages(*ec2.DescribeNetworkAclsInput, func(*ec2.DescribeNetworkAclsOutput, bool) bool) error
+ DescribeNetworkAclsPagesWithContext(aws.Context, *ec2.DescribeNetworkAclsInput, func(*ec2.DescribeNetworkAclsOutput, bool) bool, ...request.Option) error
+
+ DescribeNetworkInsightsAccessScopeAnalyses(*ec2.DescribeNetworkInsightsAccessScopeAnalysesInput) (*ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput, error)
+ DescribeNetworkInsightsAccessScopeAnalysesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAccessScopeAnalysesInput, ...request.Option) (*ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput, error)
+ DescribeNetworkInsightsAccessScopeAnalysesRequest(*ec2.DescribeNetworkInsightsAccessScopeAnalysesInput) (*request.Request, *ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput)
+
+ DescribeNetworkInsightsAccessScopeAnalysesPages(*ec2.DescribeNetworkInsightsAccessScopeAnalysesInput, func(*ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput, bool) bool) error
+ DescribeNetworkInsightsAccessScopeAnalysesPagesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAccessScopeAnalysesInput, func(*ec2.DescribeNetworkInsightsAccessScopeAnalysesOutput, bool) bool, ...request.Option) error
+
+ DescribeNetworkInsightsAccessScopes(*ec2.DescribeNetworkInsightsAccessScopesInput) (*ec2.DescribeNetworkInsightsAccessScopesOutput, error)
+ DescribeNetworkInsightsAccessScopesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAccessScopesInput, ...request.Option) (*ec2.DescribeNetworkInsightsAccessScopesOutput, error)
+ DescribeNetworkInsightsAccessScopesRequest(*ec2.DescribeNetworkInsightsAccessScopesInput) (*request.Request, *ec2.DescribeNetworkInsightsAccessScopesOutput)
+
+ DescribeNetworkInsightsAccessScopesPages(*ec2.DescribeNetworkInsightsAccessScopesInput, func(*ec2.DescribeNetworkInsightsAccessScopesOutput, bool) bool) error
+ DescribeNetworkInsightsAccessScopesPagesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAccessScopesInput, func(*ec2.DescribeNetworkInsightsAccessScopesOutput, bool) bool, ...request.Option) error
+
+ DescribeNetworkInsightsAnalyses(*ec2.DescribeNetworkInsightsAnalysesInput) (*ec2.DescribeNetworkInsightsAnalysesOutput, error)
+ DescribeNetworkInsightsAnalysesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAnalysesInput, ...request.Option) (*ec2.DescribeNetworkInsightsAnalysesOutput, error)
+ DescribeNetworkInsightsAnalysesRequest(*ec2.DescribeNetworkInsightsAnalysesInput) (*request.Request, *ec2.DescribeNetworkInsightsAnalysesOutput)
+
+ DescribeNetworkInsightsAnalysesPages(*ec2.DescribeNetworkInsightsAnalysesInput, func(*ec2.DescribeNetworkInsightsAnalysesOutput, bool) bool) error
+ DescribeNetworkInsightsAnalysesPagesWithContext(aws.Context, *ec2.DescribeNetworkInsightsAnalysesInput, func(*ec2.DescribeNetworkInsightsAnalysesOutput, bool) bool, ...request.Option) error
+
+ DescribeNetworkInsightsPaths(*ec2.DescribeNetworkInsightsPathsInput) (*ec2.DescribeNetworkInsightsPathsOutput, error)
+ DescribeNetworkInsightsPathsWithContext(aws.Context, *ec2.DescribeNetworkInsightsPathsInput, ...request.Option) (*ec2.DescribeNetworkInsightsPathsOutput, error)
+ DescribeNetworkInsightsPathsRequest(*ec2.DescribeNetworkInsightsPathsInput) (*request.Request, *ec2.DescribeNetworkInsightsPathsOutput)
+
+ DescribeNetworkInsightsPathsPages(*ec2.DescribeNetworkInsightsPathsInput, func(*ec2.DescribeNetworkInsightsPathsOutput, bool) bool) error
+ DescribeNetworkInsightsPathsPagesWithContext(aws.Context, *ec2.DescribeNetworkInsightsPathsInput, func(*ec2.DescribeNetworkInsightsPathsOutput, bool) bool, ...request.Option) error
+
+ DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error)
+ DescribeNetworkInterfaceAttributeWithContext(aws.Context, *ec2.DescribeNetworkInterfaceAttributeInput, ...request.Option) (*ec2.DescribeNetworkInterfaceAttributeOutput, error)
+ DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput)
+
+ DescribeNetworkInterfacePermissions(*ec2.DescribeNetworkInterfacePermissionsInput) (*ec2.DescribeNetworkInterfacePermissionsOutput, error)
+ DescribeNetworkInterfacePermissionsWithContext(aws.Context, *ec2.DescribeNetworkInterfacePermissionsInput, ...request.Option) (*ec2.DescribeNetworkInterfacePermissionsOutput, error)
+ DescribeNetworkInterfacePermissionsRequest(*ec2.DescribeNetworkInterfacePermissionsInput) (*request.Request, *ec2.DescribeNetworkInterfacePermissionsOutput)
+
+ DescribeNetworkInterfacePermissionsPages(*ec2.DescribeNetworkInterfacePermissionsInput, func(*ec2.DescribeNetworkInterfacePermissionsOutput, bool) bool) error
+ DescribeNetworkInterfacePermissionsPagesWithContext(aws.Context, *ec2.DescribeNetworkInterfacePermissionsInput, func(*ec2.DescribeNetworkInterfacePermissionsOutput, bool) bool, ...request.Option) error
+
+ DescribeNetworkInterfaces(*ec2.DescribeNetworkInterfacesInput) (*ec2.DescribeNetworkInterfacesOutput, error)
+ DescribeNetworkInterfacesWithContext(aws.Context, *ec2.DescribeNetworkInterfacesInput, ...request.Option) (*ec2.DescribeNetworkInterfacesOutput, error)
+ DescribeNetworkInterfacesRequest(*ec2.DescribeNetworkInterfacesInput) (*request.Request, *ec2.DescribeNetworkInterfacesOutput)
+
+ DescribeNetworkInterfacesPages(*ec2.DescribeNetworkInterfacesInput, func(*ec2.DescribeNetworkInterfacesOutput, bool) bool) error
+ DescribeNetworkInterfacesPagesWithContext(aws.Context, *ec2.DescribeNetworkInterfacesInput, func(*ec2.DescribeNetworkInterfacesOutput, bool) bool, ...request.Option) error
+
+ DescribePlacementGroups(*ec2.DescribePlacementGroupsInput) (*ec2.DescribePlacementGroupsOutput, error)
+ DescribePlacementGroupsWithContext(aws.Context, *ec2.DescribePlacementGroupsInput, ...request.Option) (*ec2.DescribePlacementGroupsOutput, error)
+ DescribePlacementGroupsRequest(*ec2.DescribePlacementGroupsInput) (*request.Request, *ec2.DescribePlacementGroupsOutput)
+
+ DescribePrefixLists(*ec2.DescribePrefixListsInput) (*ec2.DescribePrefixListsOutput, error)
+ DescribePrefixListsWithContext(aws.Context, *ec2.DescribePrefixListsInput, ...request.Option) (*ec2.DescribePrefixListsOutput, error)
+ DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput)
+
+ DescribePrefixListsPages(*ec2.DescribePrefixListsInput, func(*ec2.DescribePrefixListsOutput, bool) bool) error
+ DescribePrefixListsPagesWithContext(aws.Context, *ec2.DescribePrefixListsInput, func(*ec2.DescribePrefixListsOutput, bool) bool, ...request.Option) error
+
+ DescribePrincipalIdFormat(*ec2.DescribePrincipalIdFormatInput) (*ec2.DescribePrincipalIdFormatOutput, error)
+ DescribePrincipalIdFormatWithContext(aws.Context, *ec2.DescribePrincipalIdFormatInput, ...request.Option) (*ec2.DescribePrincipalIdFormatOutput, error)
+ DescribePrincipalIdFormatRequest(*ec2.DescribePrincipalIdFormatInput) (*request.Request, *ec2.DescribePrincipalIdFormatOutput)
+
+ DescribePrincipalIdFormatPages(*ec2.DescribePrincipalIdFormatInput, func(*ec2.DescribePrincipalIdFormatOutput, bool) bool) error
+ DescribePrincipalIdFormatPagesWithContext(aws.Context, *ec2.DescribePrincipalIdFormatInput, func(*ec2.DescribePrincipalIdFormatOutput, bool) bool, ...request.Option) error
+
+ DescribePublicIpv4Pools(*ec2.DescribePublicIpv4PoolsInput) (*ec2.DescribePublicIpv4PoolsOutput, error)
+ DescribePublicIpv4PoolsWithContext(aws.Context, *ec2.DescribePublicIpv4PoolsInput, ...request.Option) (*ec2.DescribePublicIpv4PoolsOutput, error)
+ DescribePublicIpv4PoolsRequest(*ec2.DescribePublicIpv4PoolsInput) (*request.Request, *ec2.DescribePublicIpv4PoolsOutput)
+
+ DescribePublicIpv4PoolsPages(*ec2.DescribePublicIpv4PoolsInput, func(*ec2.DescribePublicIpv4PoolsOutput, bool) bool) error
+ DescribePublicIpv4PoolsPagesWithContext(aws.Context, *ec2.DescribePublicIpv4PoolsInput, func(*ec2.DescribePublicIpv4PoolsOutput, bool) bool, ...request.Option) error
+
+ DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error)
+ DescribeRegionsWithContext(aws.Context, *ec2.DescribeRegionsInput, ...request.Option) (*ec2.DescribeRegionsOutput, error)
+ DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput)
+
+ DescribeReplaceRootVolumeTasks(*ec2.DescribeReplaceRootVolumeTasksInput) (*ec2.DescribeReplaceRootVolumeTasksOutput, error)
+ DescribeReplaceRootVolumeTasksWithContext(aws.Context, *ec2.DescribeReplaceRootVolumeTasksInput, ...request.Option) (*ec2.DescribeReplaceRootVolumeTasksOutput, error)
+ DescribeReplaceRootVolumeTasksRequest(*ec2.DescribeReplaceRootVolumeTasksInput) (*request.Request, *ec2.DescribeReplaceRootVolumeTasksOutput)
+
+ DescribeReplaceRootVolumeTasksPages(*ec2.DescribeReplaceRootVolumeTasksInput, func(*ec2.DescribeReplaceRootVolumeTasksOutput, bool) bool) error
+ DescribeReplaceRootVolumeTasksPagesWithContext(aws.Context, *ec2.DescribeReplaceRootVolumeTasksInput, func(*ec2.DescribeReplaceRootVolumeTasksOutput, bool) bool, ...request.Option) error
+
+ DescribeReservedInstances(*ec2.DescribeReservedInstancesInput) (*ec2.DescribeReservedInstancesOutput, error)
+ DescribeReservedInstancesWithContext(aws.Context, *ec2.DescribeReservedInstancesInput, ...request.Option) (*ec2.DescribeReservedInstancesOutput, error)
+ DescribeReservedInstancesRequest(*ec2.DescribeReservedInstancesInput) (*request.Request, *ec2.DescribeReservedInstancesOutput)
+
+ DescribeReservedInstancesListings(*ec2.DescribeReservedInstancesListingsInput) (*ec2.DescribeReservedInstancesListingsOutput, error)
+ DescribeReservedInstancesListingsWithContext(aws.Context, *ec2.DescribeReservedInstancesListingsInput, ...request.Option) (*ec2.DescribeReservedInstancesListingsOutput, error)
+ DescribeReservedInstancesListingsRequest(*ec2.DescribeReservedInstancesListingsInput) (*request.Request, *ec2.DescribeReservedInstancesListingsOutput)
+
+ DescribeReservedInstancesModifications(*ec2.DescribeReservedInstancesModificationsInput) (*ec2.DescribeReservedInstancesModificationsOutput, error)
+ DescribeReservedInstancesModificationsWithContext(aws.Context, *ec2.DescribeReservedInstancesModificationsInput, ...request.Option) (*ec2.DescribeReservedInstancesModificationsOutput, error)
+ DescribeReservedInstancesModificationsRequest(*ec2.DescribeReservedInstancesModificationsInput) (*request.Request, *ec2.DescribeReservedInstancesModificationsOutput)
+
+ DescribeReservedInstancesModificationsPages(*ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool) error
+ DescribeReservedInstancesModificationsPagesWithContext(aws.Context, *ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool, ...request.Option) error
+
+ DescribeReservedInstancesOfferings(*ec2.DescribeReservedInstancesOfferingsInput) (*ec2.DescribeReservedInstancesOfferingsOutput, error)
+ DescribeReservedInstancesOfferingsWithContext(aws.Context, *ec2.DescribeReservedInstancesOfferingsInput, ...request.Option) (*ec2.DescribeReservedInstancesOfferingsOutput, error)
+ DescribeReservedInstancesOfferingsRequest(*ec2.DescribeReservedInstancesOfferingsInput) (*request.Request, *ec2.DescribeReservedInstancesOfferingsOutput)
+
+ DescribeReservedInstancesOfferingsPages(*ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool) error
+ DescribeReservedInstancesOfferingsPagesWithContext(aws.Context, *ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool, ...request.Option) error
+
+ DescribeRouteTables(*ec2.DescribeRouteTablesInput) (*ec2.DescribeRouteTablesOutput, error)
+ DescribeRouteTablesWithContext(aws.Context, *ec2.DescribeRouteTablesInput, ...request.Option) (*ec2.DescribeRouteTablesOutput, error)
+ DescribeRouteTablesRequest(*ec2.DescribeRouteTablesInput) (*request.Request, *ec2.DescribeRouteTablesOutput)
+
+ DescribeRouteTablesPages(*ec2.DescribeRouteTablesInput, func(*ec2.DescribeRouteTablesOutput, bool) bool) error
+ DescribeRouteTablesPagesWithContext(aws.Context, *ec2.DescribeRouteTablesInput, func(*ec2.DescribeRouteTablesOutput, bool) bool, ...request.Option) error
+
+ DescribeScheduledInstanceAvailability(*ec2.DescribeScheduledInstanceAvailabilityInput) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error)
+ DescribeScheduledInstanceAvailabilityWithContext(aws.Context, *ec2.DescribeScheduledInstanceAvailabilityInput, ...request.Option) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error)
+ DescribeScheduledInstanceAvailabilityRequest(*ec2.DescribeScheduledInstanceAvailabilityInput) (*request.Request, *ec2.DescribeScheduledInstanceAvailabilityOutput)
+
+ DescribeScheduledInstanceAvailabilityPages(*ec2.DescribeScheduledInstanceAvailabilityInput, func(*ec2.DescribeScheduledInstanceAvailabilityOutput, bool) bool) error
+ DescribeScheduledInstanceAvailabilityPagesWithContext(aws.Context, *ec2.DescribeScheduledInstanceAvailabilityInput, func(*ec2.DescribeScheduledInstanceAvailabilityOutput, bool) bool, ...request.Option) error
+
+ DescribeScheduledInstances(*ec2.DescribeScheduledInstancesInput) (*ec2.DescribeScheduledInstancesOutput, error)
+ DescribeScheduledInstancesWithContext(aws.Context, *ec2.DescribeScheduledInstancesInput, ...request.Option) (*ec2.DescribeScheduledInstancesOutput, error)
+ DescribeScheduledInstancesRequest(*ec2.DescribeScheduledInstancesInput) (*request.Request, *ec2.DescribeScheduledInstancesOutput)
+
+ DescribeScheduledInstancesPages(*ec2.DescribeScheduledInstancesInput, func(*ec2.DescribeScheduledInstancesOutput, bool) bool) error
+ DescribeScheduledInstancesPagesWithContext(aws.Context, *ec2.DescribeScheduledInstancesInput, func(*ec2.DescribeScheduledInstancesOutput, bool) bool, ...request.Option) error
+
+ DescribeSecurityGroupReferences(*ec2.DescribeSecurityGroupReferencesInput) (*ec2.DescribeSecurityGroupReferencesOutput, error)
+ DescribeSecurityGroupReferencesWithContext(aws.Context, *ec2.DescribeSecurityGroupReferencesInput, ...request.Option) (*ec2.DescribeSecurityGroupReferencesOutput, error)
+ DescribeSecurityGroupReferencesRequest(*ec2.DescribeSecurityGroupReferencesInput) (*request.Request, *ec2.DescribeSecurityGroupReferencesOutput)
+
+ DescribeSecurityGroupRules(*ec2.DescribeSecurityGroupRulesInput) (*ec2.DescribeSecurityGroupRulesOutput, error)
+ DescribeSecurityGroupRulesWithContext(aws.Context, *ec2.DescribeSecurityGroupRulesInput, ...request.Option) (*ec2.DescribeSecurityGroupRulesOutput, error)
+ DescribeSecurityGroupRulesRequest(*ec2.DescribeSecurityGroupRulesInput) (*request.Request, *ec2.DescribeSecurityGroupRulesOutput)
+
+ DescribeSecurityGroupRulesPages(*ec2.DescribeSecurityGroupRulesInput, func(*ec2.DescribeSecurityGroupRulesOutput, bool) bool) error
+ DescribeSecurityGroupRulesPagesWithContext(aws.Context, *ec2.DescribeSecurityGroupRulesInput, func(*ec2.DescribeSecurityGroupRulesOutput, bool) bool, ...request.Option) error
+
+ DescribeSecurityGroups(*ec2.DescribeSecurityGroupsInput) (*ec2.DescribeSecurityGroupsOutput, error)
+ DescribeSecurityGroupsWithContext(aws.Context, *ec2.DescribeSecurityGroupsInput, ...request.Option) (*ec2.DescribeSecurityGroupsOutput, error)
+ DescribeSecurityGroupsRequest(*ec2.DescribeSecurityGroupsInput) (*request.Request, *ec2.DescribeSecurityGroupsOutput)
+
+ DescribeSecurityGroupsPages(*ec2.DescribeSecurityGroupsInput, func(*ec2.DescribeSecurityGroupsOutput, bool) bool) error
+ DescribeSecurityGroupsPagesWithContext(aws.Context, *ec2.DescribeSecurityGroupsInput, func(*ec2.DescribeSecurityGroupsOutput, bool) bool, ...request.Option) error
+
+ DescribeSnapshotAttribute(*ec2.DescribeSnapshotAttributeInput) (*ec2.DescribeSnapshotAttributeOutput, error)
+ DescribeSnapshotAttributeWithContext(aws.Context, *ec2.DescribeSnapshotAttributeInput, ...request.Option) (*ec2.DescribeSnapshotAttributeOutput, error)
+ DescribeSnapshotAttributeRequest(*ec2.DescribeSnapshotAttributeInput) (*request.Request, *ec2.DescribeSnapshotAttributeOutput)
+
+ DescribeSnapshotTierStatus(*ec2.DescribeSnapshotTierStatusInput) (*ec2.DescribeSnapshotTierStatusOutput, error)
+ DescribeSnapshotTierStatusWithContext(aws.Context, *ec2.DescribeSnapshotTierStatusInput, ...request.Option) (*ec2.DescribeSnapshotTierStatusOutput, error)
+ DescribeSnapshotTierStatusRequest(*ec2.DescribeSnapshotTierStatusInput) (*request.Request, *ec2.DescribeSnapshotTierStatusOutput)
+
+ DescribeSnapshotTierStatusPages(*ec2.DescribeSnapshotTierStatusInput, func(*ec2.DescribeSnapshotTierStatusOutput, bool) bool) error
+ DescribeSnapshotTierStatusPagesWithContext(aws.Context, *ec2.DescribeSnapshotTierStatusInput, func(*ec2.DescribeSnapshotTierStatusOutput, bool) bool, ...request.Option) error
+
+ DescribeSnapshots(*ec2.DescribeSnapshotsInput) (*ec2.DescribeSnapshotsOutput, error)
+ DescribeSnapshotsWithContext(aws.Context, *ec2.DescribeSnapshotsInput, ...request.Option) (*ec2.DescribeSnapshotsOutput, error)
+ DescribeSnapshotsRequest(*ec2.DescribeSnapshotsInput) (*request.Request, *ec2.DescribeSnapshotsOutput)
+
+ DescribeSnapshotsPages(*ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool) error
+ DescribeSnapshotsPagesWithContext(aws.Context, *ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool, ...request.Option) error
+
+ DescribeSpotDatafeedSubscription(*ec2.DescribeSpotDatafeedSubscriptionInput) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error)
+ DescribeSpotDatafeedSubscriptionWithContext(aws.Context, *ec2.DescribeSpotDatafeedSubscriptionInput, ...request.Option) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error)
+ DescribeSpotDatafeedSubscriptionRequest(*ec2.DescribeSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DescribeSpotDatafeedSubscriptionOutput)
+
+ DescribeSpotFleetInstances(*ec2.DescribeSpotFleetInstancesInput) (*ec2.DescribeSpotFleetInstancesOutput, error)
+ DescribeSpotFleetInstancesWithContext(aws.Context, *ec2.DescribeSpotFleetInstancesInput, ...request.Option) (*ec2.DescribeSpotFleetInstancesOutput, error)
+ DescribeSpotFleetInstancesRequest(*ec2.DescribeSpotFleetInstancesInput) (*request.Request, *ec2.DescribeSpotFleetInstancesOutput)
+
+ DescribeSpotFleetRequestHistory(*ec2.DescribeSpotFleetRequestHistoryInput) (*ec2.DescribeSpotFleetRequestHistoryOutput, error)
+ DescribeSpotFleetRequestHistoryWithContext(aws.Context, *ec2.DescribeSpotFleetRequestHistoryInput, ...request.Option) (*ec2.DescribeSpotFleetRequestHistoryOutput, error)
+ DescribeSpotFleetRequestHistoryRequest(*ec2.DescribeSpotFleetRequestHistoryInput) (*request.Request, *ec2.DescribeSpotFleetRequestHistoryOutput)
+
+ DescribeSpotFleetRequests(*ec2.DescribeSpotFleetRequestsInput) (*ec2.DescribeSpotFleetRequestsOutput, error)
+ DescribeSpotFleetRequestsWithContext(aws.Context, *ec2.DescribeSpotFleetRequestsInput, ...request.Option) (*ec2.DescribeSpotFleetRequestsOutput, error)
+ DescribeSpotFleetRequestsRequest(*ec2.DescribeSpotFleetRequestsInput) (*request.Request, *ec2.DescribeSpotFleetRequestsOutput)
+
+ DescribeSpotFleetRequestsPages(*ec2.DescribeSpotFleetRequestsInput, func(*ec2.DescribeSpotFleetRequestsOutput, bool) bool) error
+ DescribeSpotFleetRequestsPagesWithContext(aws.Context, *ec2.DescribeSpotFleetRequestsInput, func(*ec2.DescribeSpotFleetRequestsOutput, bool) bool, ...request.Option) error
+
+ DescribeSpotInstanceRequests(*ec2.DescribeSpotInstanceRequestsInput) (*ec2.DescribeSpotInstanceRequestsOutput, error)
+ DescribeSpotInstanceRequestsWithContext(aws.Context, *ec2.DescribeSpotInstanceRequestsInput, ...request.Option) (*ec2.DescribeSpotInstanceRequestsOutput, error)
+ DescribeSpotInstanceRequestsRequest(*ec2.DescribeSpotInstanceRequestsInput) (*request.Request, *ec2.DescribeSpotInstanceRequestsOutput)
+
+ DescribeSpotInstanceRequestsPages(*ec2.DescribeSpotInstanceRequestsInput, func(*ec2.DescribeSpotInstanceRequestsOutput, bool) bool) error
+ DescribeSpotInstanceRequestsPagesWithContext(aws.Context, *ec2.DescribeSpotInstanceRequestsInput, func(*ec2.DescribeSpotInstanceRequestsOutput, bool) bool, ...request.Option) error
+
+ DescribeSpotPriceHistory(*ec2.DescribeSpotPriceHistoryInput) (*ec2.DescribeSpotPriceHistoryOutput, error)
+ DescribeSpotPriceHistoryWithContext(aws.Context, *ec2.DescribeSpotPriceHistoryInput, ...request.Option) (*ec2.DescribeSpotPriceHistoryOutput, error)
+ DescribeSpotPriceHistoryRequest(*ec2.DescribeSpotPriceHistoryInput) (*request.Request, *ec2.DescribeSpotPriceHistoryOutput)
+
+ DescribeSpotPriceHistoryPages(*ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error
+ DescribeSpotPriceHistoryPagesWithContext(aws.Context, *ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool, ...request.Option) error
+
+ DescribeStaleSecurityGroups(*ec2.DescribeStaleSecurityGroupsInput) (*ec2.DescribeStaleSecurityGroupsOutput, error)
+ DescribeStaleSecurityGroupsWithContext(aws.Context, *ec2.DescribeStaleSecurityGroupsInput, ...request.Option) (*ec2.DescribeStaleSecurityGroupsOutput, error)
+ DescribeStaleSecurityGroupsRequest(*ec2.DescribeStaleSecurityGroupsInput) (*request.Request, *ec2.DescribeStaleSecurityGroupsOutput)
+
+ DescribeStaleSecurityGroupsPages(*ec2.DescribeStaleSecurityGroupsInput, func(*ec2.DescribeStaleSecurityGroupsOutput, bool) bool) error
+ DescribeStaleSecurityGroupsPagesWithContext(aws.Context, *ec2.DescribeStaleSecurityGroupsInput, func(*ec2.DescribeStaleSecurityGroupsOutput, bool) bool, ...request.Option) error
+
+ DescribeStoreImageTasks(*ec2.DescribeStoreImageTasksInput) (*ec2.DescribeStoreImageTasksOutput, error)
+ DescribeStoreImageTasksWithContext(aws.Context, *ec2.DescribeStoreImageTasksInput, ...request.Option) (*ec2.DescribeStoreImageTasksOutput, error)
+ DescribeStoreImageTasksRequest(*ec2.DescribeStoreImageTasksInput) (*request.Request, *ec2.DescribeStoreImageTasksOutput)
+
+ DescribeStoreImageTasksPages(*ec2.DescribeStoreImageTasksInput, func(*ec2.DescribeStoreImageTasksOutput, bool) bool) error
+ DescribeStoreImageTasksPagesWithContext(aws.Context, *ec2.DescribeStoreImageTasksInput, func(*ec2.DescribeStoreImageTasksOutput, bool) bool, ...request.Option) error
+
+ DescribeSubnets(*ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error)
+ DescribeSubnetsWithContext(aws.Context, *ec2.DescribeSubnetsInput, ...request.Option) (*ec2.DescribeSubnetsOutput, error)
+ DescribeSubnetsRequest(*ec2.DescribeSubnetsInput) (*request.Request, *ec2.DescribeSubnetsOutput)
+
+ DescribeSubnetsPages(*ec2.DescribeSubnetsInput, func(*ec2.DescribeSubnetsOutput, bool) bool) error
+ DescribeSubnetsPagesWithContext(aws.Context, *ec2.DescribeSubnetsInput, func(*ec2.DescribeSubnetsOutput, bool) bool, ...request.Option) error
+
+ DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error)
+ DescribeTagsWithContext(aws.Context, *ec2.DescribeTagsInput, ...request.Option) (*ec2.DescribeTagsOutput, error)
+ DescribeTagsRequest(*ec2.DescribeTagsInput) (*request.Request, *ec2.DescribeTagsOutput)
+
+ DescribeTagsPages(*ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool) error
+ DescribeTagsPagesWithContext(aws.Context, *ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool, ...request.Option) error
+
+ DescribeTrafficMirrorFilters(*ec2.DescribeTrafficMirrorFiltersInput) (*ec2.DescribeTrafficMirrorFiltersOutput, error)
+ DescribeTrafficMirrorFiltersWithContext(aws.Context, *ec2.DescribeTrafficMirrorFiltersInput, ...request.Option) (*ec2.DescribeTrafficMirrorFiltersOutput, error)
+ DescribeTrafficMirrorFiltersRequest(*ec2.DescribeTrafficMirrorFiltersInput) (*request.Request, *ec2.DescribeTrafficMirrorFiltersOutput)
+
+ DescribeTrafficMirrorFiltersPages(*ec2.DescribeTrafficMirrorFiltersInput, func(*ec2.DescribeTrafficMirrorFiltersOutput, bool) bool) error
+ DescribeTrafficMirrorFiltersPagesWithContext(aws.Context, *ec2.DescribeTrafficMirrorFiltersInput, func(*ec2.DescribeTrafficMirrorFiltersOutput, bool) bool, ...request.Option) error
+
+ DescribeTrafficMirrorSessions(*ec2.DescribeTrafficMirrorSessionsInput) (*ec2.DescribeTrafficMirrorSessionsOutput, error)
+ DescribeTrafficMirrorSessionsWithContext(aws.Context, *ec2.DescribeTrafficMirrorSessionsInput, ...request.Option) (*ec2.DescribeTrafficMirrorSessionsOutput, error)
+ DescribeTrafficMirrorSessionsRequest(*ec2.DescribeTrafficMirrorSessionsInput) (*request.Request, *ec2.DescribeTrafficMirrorSessionsOutput)
+
+ DescribeTrafficMirrorSessionsPages(*ec2.DescribeTrafficMirrorSessionsInput, func(*ec2.DescribeTrafficMirrorSessionsOutput, bool) bool) error
+ DescribeTrafficMirrorSessionsPagesWithContext(aws.Context, *ec2.DescribeTrafficMirrorSessionsInput, func(*ec2.DescribeTrafficMirrorSessionsOutput, bool) bool, ...request.Option) error
+
+ DescribeTrafficMirrorTargets(*ec2.DescribeTrafficMirrorTargetsInput) (*ec2.DescribeTrafficMirrorTargetsOutput, error)
+ DescribeTrafficMirrorTargetsWithContext(aws.Context, *ec2.DescribeTrafficMirrorTargetsInput, ...request.Option) (*ec2.DescribeTrafficMirrorTargetsOutput, error)
+ DescribeTrafficMirrorTargetsRequest(*ec2.DescribeTrafficMirrorTargetsInput) (*request.Request, *ec2.DescribeTrafficMirrorTargetsOutput)
+
+ DescribeTrafficMirrorTargetsPages(*ec2.DescribeTrafficMirrorTargetsInput, func(*ec2.DescribeTrafficMirrorTargetsOutput, bool) bool) error
+ DescribeTrafficMirrorTargetsPagesWithContext(aws.Context, *ec2.DescribeTrafficMirrorTargetsInput, func(*ec2.DescribeTrafficMirrorTargetsOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayAttachments(*ec2.DescribeTransitGatewayAttachmentsInput) (*ec2.DescribeTransitGatewayAttachmentsOutput, error)
+ DescribeTransitGatewayAttachmentsWithContext(aws.Context, *ec2.DescribeTransitGatewayAttachmentsInput, ...request.Option) (*ec2.DescribeTransitGatewayAttachmentsOutput, error)
+ DescribeTransitGatewayAttachmentsRequest(*ec2.DescribeTransitGatewayAttachmentsInput) (*request.Request, *ec2.DescribeTransitGatewayAttachmentsOutput)
+
+ DescribeTransitGatewayAttachmentsPages(*ec2.DescribeTransitGatewayAttachmentsInput, func(*ec2.DescribeTransitGatewayAttachmentsOutput, bool) bool) error
+ DescribeTransitGatewayAttachmentsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayAttachmentsInput, func(*ec2.DescribeTransitGatewayAttachmentsOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayConnectPeers(*ec2.DescribeTransitGatewayConnectPeersInput) (*ec2.DescribeTransitGatewayConnectPeersOutput, error)
+ DescribeTransitGatewayConnectPeersWithContext(aws.Context, *ec2.DescribeTransitGatewayConnectPeersInput, ...request.Option) (*ec2.DescribeTransitGatewayConnectPeersOutput, error)
+ DescribeTransitGatewayConnectPeersRequest(*ec2.DescribeTransitGatewayConnectPeersInput) (*request.Request, *ec2.DescribeTransitGatewayConnectPeersOutput)
+
+ DescribeTransitGatewayConnectPeersPages(*ec2.DescribeTransitGatewayConnectPeersInput, func(*ec2.DescribeTransitGatewayConnectPeersOutput, bool) bool) error
+ DescribeTransitGatewayConnectPeersPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayConnectPeersInput, func(*ec2.DescribeTransitGatewayConnectPeersOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayConnects(*ec2.DescribeTransitGatewayConnectsInput) (*ec2.DescribeTransitGatewayConnectsOutput, error)
+ DescribeTransitGatewayConnectsWithContext(aws.Context, *ec2.DescribeTransitGatewayConnectsInput, ...request.Option) (*ec2.DescribeTransitGatewayConnectsOutput, error)
+ DescribeTransitGatewayConnectsRequest(*ec2.DescribeTransitGatewayConnectsInput) (*request.Request, *ec2.DescribeTransitGatewayConnectsOutput)
+
+ DescribeTransitGatewayConnectsPages(*ec2.DescribeTransitGatewayConnectsInput, func(*ec2.DescribeTransitGatewayConnectsOutput, bool) bool) error
+ DescribeTransitGatewayConnectsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayConnectsInput, func(*ec2.DescribeTransitGatewayConnectsOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayMulticastDomains(*ec2.DescribeTransitGatewayMulticastDomainsInput) (*ec2.DescribeTransitGatewayMulticastDomainsOutput, error)
+ DescribeTransitGatewayMulticastDomainsWithContext(aws.Context, *ec2.DescribeTransitGatewayMulticastDomainsInput, ...request.Option) (*ec2.DescribeTransitGatewayMulticastDomainsOutput, error)
+ DescribeTransitGatewayMulticastDomainsRequest(*ec2.DescribeTransitGatewayMulticastDomainsInput) (*request.Request, *ec2.DescribeTransitGatewayMulticastDomainsOutput)
+
+ DescribeTransitGatewayMulticastDomainsPages(*ec2.DescribeTransitGatewayMulticastDomainsInput, func(*ec2.DescribeTransitGatewayMulticastDomainsOutput, bool) bool) error
+ DescribeTransitGatewayMulticastDomainsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayMulticastDomainsInput, func(*ec2.DescribeTransitGatewayMulticastDomainsOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayPeeringAttachments(*ec2.DescribeTransitGatewayPeeringAttachmentsInput) (*ec2.DescribeTransitGatewayPeeringAttachmentsOutput, error)
+ DescribeTransitGatewayPeeringAttachmentsWithContext(aws.Context, *ec2.DescribeTransitGatewayPeeringAttachmentsInput, ...request.Option) (*ec2.DescribeTransitGatewayPeeringAttachmentsOutput, error)
+ DescribeTransitGatewayPeeringAttachmentsRequest(*ec2.DescribeTransitGatewayPeeringAttachmentsInput) (*request.Request, *ec2.DescribeTransitGatewayPeeringAttachmentsOutput)
+
+ DescribeTransitGatewayPeeringAttachmentsPages(*ec2.DescribeTransitGatewayPeeringAttachmentsInput, func(*ec2.DescribeTransitGatewayPeeringAttachmentsOutput, bool) bool) error
+ DescribeTransitGatewayPeeringAttachmentsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayPeeringAttachmentsInput, func(*ec2.DescribeTransitGatewayPeeringAttachmentsOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayPolicyTables(*ec2.DescribeTransitGatewayPolicyTablesInput) (*ec2.DescribeTransitGatewayPolicyTablesOutput, error)
+ DescribeTransitGatewayPolicyTablesWithContext(aws.Context, *ec2.DescribeTransitGatewayPolicyTablesInput, ...request.Option) (*ec2.DescribeTransitGatewayPolicyTablesOutput, error)
+ DescribeTransitGatewayPolicyTablesRequest(*ec2.DescribeTransitGatewayPolicyTablesInput) (*request.Request, *ec2.DescribeTransitGatewayPolicyTablesOutput)
+
+ DescribeTransitGatewayPolicyTablesPages(*ec2.DescribeTransitGatewayPolicyTablesInput, func(*ec2.DescribeTransitGatewayPolicyTablesOutput, bool) bool) error
+ DescribeTransitGatewayPolicyTablesPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayPolicyTablesInput, func(*ec2.DescribeTransitGatewayPolicyTablesOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayRouteTableAnnouncements(*ec2.DescribeTransitGatewayRouteTableAnnouncementsInput) (*ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput, error)
+ DescribeTransitGatewayRouteTableAnnouncementsWithContext(aws.Context, *ec2.DescribeTransitGatewayRouteTableAnnouncementsInput, ...request.Option) (*ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput, error)
+ DescribeTransitGatewayRouteTableAnnouncementsRequest(*ec2.DescribeTransitGatewayRouteTableAnnouncementsInput) (*request.Request, *ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput)
+
+ DescribeTransitGatewayRouteTableAnnouncementsPages(*ec2.DescribeTransitGatewayRouteTableAnnouncementsInput, func(*ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput, bool) bool) error
+ DescribeTransitGatewayRouteTableAnnouncementsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayRouteTableAnnouncementsInput, func(*ec2.DescribeTransitGatewayRouteTableAnnouncementsOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayRouteTables(*ec2.DescribeTransitGatewayRouteTablesInput) (*ec2.DescribeTransitGatewayRouteTablesOutput, error)
+ DescribeTransitGatewayRouteTablesWithContext(aws.Context, *ec2.DescribeTransitGatewayRouteTablesInput, ...request.Option) (*ec2.DescribeTransitGatewayRouteTablesOutput, error)
+ DescribeTransitGatewayRouteTablesRequest(*ec2.DescribeTransitGatewayRouteTablesInput) (*request.Request, *ec2.DescribeTransitGatewayRouteTablesOutput)
+
+ DescribeTransitGatewayRouteTablesPages(*ec2.DescribeTransitGatewayRouteTablesInput, func(*ec2.DescribeTransitGatewayRouteTablesOutput, bool) bool) error
+ DescribeTransitGatewayRouteTablesPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayRouteTablesInput, func(*ec2.DescribeTransitGatewayRouteTablesOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGatewayVpcAttachments(*ec2.DescribeTransitGatewayVpcAttachmentsInput) (*ec2.DescribeTransitGatewayVpcAttachmentsOutput, error)
+ DescribeTransitGatewayVpcAttachmentsWithContext(aws.Context, *ec2.DescribeTransitGatewayVpcAttachmentsInput, ...request.Option) (*ec2.DescribeTransitGatewayVpcAttachmentsOutput, error)
+ DescribeTransitGatewayVpcAttachmentsRequest(*ec2.DescribeTransitGatewayVpcAttachmentsInput) (*request.Request, *ec2.DescribeTransitGatewayVpcAttachmentsOutput)
+
+ DescribeTransitGatewayVpcAttachmentsPages(*ec2.DescribeTransitGatewayVpcAttachmentsInput, func(*ec2.DescribeTransitGatewayVpcAttachmentsOutput, bool) bool) error
+ DescribeTransitGatewayVpcAttachmentsPagesWithContext(aws.Context, *ec2.DescribeTransitGatewayVpcAttachmentsInput, func(*ec2.DescribeTransitGatewayVpcAttachmentsOutput, bool) bool, ...request.Option) error
+
+ DescribeTransitGateways(*ec2.DescribeTransitGatewaysInput) (*ec2.DescribeTransitGatewaysOutput, error)
+ DescribeTransitGatewaysWithContext(aws.Context, *ec2.DescribeTransitGatewaysInput, ...request.Option) (*ec2.DescribeTransitGatewaysOutput, error)
+ DescribeTransitGatewaysRequest(*ec2.DescribeTransitGatewaysInput) (*request.Request, *ec2.DescribeTransitGatewaysOutput)
+
+ DescribeTransitGatewaysPages(*ec2.DescribeTransitGatewaysInput, func(*ec2.DescribeTransitGatewaysOutput, bool) bool) error
+ DescribeTransitGatewaysPagesWithContext(aws.Context, *ec2.DescribeTransitGatewaysInput, func(*ec2.DescribeTransitGatewaysOutput, bool) bool, ...request.Option) error
+
+ DescribeTrunkInterfaceAssociations(*ec2.DescribeTrunkInterfaceAssociationsInput) (*ec2.DescribeTrunkInterfaceAssociationsOutput, error)
+ DescribeTrunkInterfaceAssociationsWithContext(aws.Context, *ec2.DescribeTrunkInterfaceAssociationsInput, ...request.Option) (*ec2.DescribeTrunkInterfaceAssociationsOutput, error)
+ DescribeTrunkInterfaceAssociationsRequest(*ec2.DescribeTrunkInterfaceAssociationsInput) (*request.Request, *ec2.DescribeTrunkInterfaceAssociationsOutput)
+
+ DescribeTrunkInterfaceAssociationsPages(*ec2.DescribeTrunkInterfaceAssociationsInput, func(*ec2.DescribeTrunkInterfaceAssociationsOutput, bool) bool) error
+ DescribeTrunkInterfaceAssociationsPagesWithContext(aws.Context, *ec2.DescribeTrunkInterfaceAssociationsInput, func(*ec2.DescribeTrunkInterfaceAssociationsOutput, bool) bool, ...request.Option) error
+
+ DescribeVerifiedAccessEndpoints(*ec2.DescribeVerifiedAccessEndpointsInput) (*ec2.DescribeVerifiedAccessEndpointsOutput, error)
+ DescribeVerifiedAccessEndpointsWithContext(aws.Context, *ec2.DescribeVerifiedAccessEndpointsInput, ...request.Option) (*ec2.DescribeVerifiedAccessEndpointsOutput, error)
+ DescribeVerifiedAccessEndpointsRequest(*ec2.DescribeVerifiedAccessEndpointsInput) (*request.Request, *ec2.DescribeVerifiedAccessEndpointsOutput)
+
+ DescribeVerifiedAccessEndpointsPages(*ec2.DescribeVerifiedAccessEndpointsInput, func(*ec2.DescribeVerifiedAccessEndpointsOutput, bool) bool) error
+ DescribeVerifiedAccessEndpointsPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessEndpointsInput, func(*ec2.DescribeVerifiedAccessEndpointsOutput, bool) bool, ...request.Option) error
+
+ DescribeVerifiedAccessGroups(*ec2.DescribeVerifiedAccessGroupsInput) (*ec2.DescribeVerifiedAccessGroupsOutput, error)
+ DescribeVerifiedAccessGroupsWithContext(aws.Context, *ec2.DescribeVerifiedAccessGroupsInput, ...request.Option) (*ec2.DescribeVerifiedAccessGroupsOutput, error)
+ DescribeVerifiedAccessGroupsRequest(*ec2.DescribeVerifiedAccessGroupsInput) (*request.Request, *ec2.DescribeVerifiedAccessGroupsOutput)
+
+ DescribeVerifiedAccessGroupsPages(*ec2.DescribeVerifiedAccessGroupsInput, func(*ec2.DescribeVerifiedAccessGroupsOutput, bool) bool) error
+ DescribeVerifiedAccessGroupsPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessGroupsInput, func(*ec2.DescribeVerifiedAccessGroupsOutput, bool) bool, ...request.Option) error
+
+ DescribeVerifiedAccessInstanceLoggingConfigurations(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput) (*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, error)
+ DescribeVerifiedAccessInstanceLoggingConfigurationsWithContext(aws.Context, *ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput, ...request.Option) (*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, error)
+ DescribeVerifiedAccessInstanceLoggingConfigurationsRequest(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput) (*request.Request, *ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput)
+
+ DescribeVerifiedAccessInstanceLoggingConfigurationsPages(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput, func(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, bool) bool) error
+ DescribeVerifiedAccessInstanceLoggingConfigurationsPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsInput, func(*ec2.DescribeVerifiedAccessInstanceLoggingConfigurationsOutput, bool) bool, ...request.Option) error
+
+ DescribeVerifiedAccessInstances(*ec2.DescribeVerifiedAccessInstancesInput) (*ec2.DescribeVerifiedAccessInstancesOutput, error)
+ DescribeVerifiedAccessInstancesWithContext(aws.Context, *ec2.DescribeVerifiedAccessInstancesInput, ...request.Option) (*ec2.DescribeVerifiedAccessInstancesOutput, error)
+ DescribeVerifiedAccessInstancesRequest(*ec2.DescribeVerifiedAccessInstancesInput) (*request.Request, *ec2.DescribeVerifiedAccessInstancesOutput)
+
+ DescribeVerifiedAccessInstancesPages(*ec2.DescribeVerifiedAccessInstancesInput, func(*ec2.DescribeVerifiedAccessInstancesOutput, bool) bool) error
+ DescribeVerifiedAccessInstancesPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessInstancesInput, func(*ec2.DescribeVerifiedAccessInstancesOutput, bool) bool, ...request.Option) error
+
+ DescribeVerifiedAccessTrustProviders(*ec2.DescribeVerifiedAccessTrustProvidersInput) (*ec2.DescribeVerifiedAccessTrustProvidersOutput, error)
+ DescribeVerifiedAccessTrustProvidersWithContext(aws.Context, *ec2.DescribeVerifiedAccessTrustProvidersInput, ...request.Option) (*ec2.DescribeVerifiedAccessTrustProvidersOutput, error)
+ DescribeVerifiedAccessTrustProvidersRequest(*ec2.DescribeVerifiedAccessTrustProvidersInput) (*request.Request, *ec2.DescribeVerifiedAccessTrustProvidersOutput)
+
+ DescribeVerifiedAccessTrustProvidersPages(*ec2.DescribeVerifiedAccessTrustProvidersInput, func(*ec2.DescribeVerifiedAccessTrustProvidersOutput, bool) bool) error
+ DescribeVerifiedAccessTrustProvidersPagesWithContext(aws.Context, *ec2.DescribeVerifiedAccessTrustProvidersInput, func(*ec2.DescribeVerifiedAccessTrustProvidersOutput, bool) bool, ...request.Option) error
+
+ DescribeVolumeAttribute(*ec2.DescribeVolumeAttributeInput) (*ec2.DescribeVolumeAttributeOutput, error)
+ DescribeVolumeAttributeWithContext(aws.Context, *ec2.DescribeVolumeAttributeInput, ...request.Option) (*ec2.DescribeVolumeAttributeOutput, error)
+ DescribeVolumeAttributeRequest(*ec2.DescribeVolumeAttributeInput) (*request.Request, *ec2.DescribeVolumeAttributeOutput)
+
+ DescribeVolumeStatus(*ec2.DescribeVolumeStatusInput) (*ec2.DescribeVolumeStatusOutput, error)
+ DescribeVolumeStatusWithContext(aws.Context, *ec2.DescribeVolumeStatusInput, ...request.Option) (*ec2.DescribeVolumeStatusOutput, error)
+ DescribeVolumeStatusRequest(*ec2.DescribeVolumeStatusInput) (*request.Request, *ec2.DescribeVolumeStatusOutput)
+
+ DescribeVolumeStatusPages(*ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool) error
+ DescribeVolumeStatusPagesWithContext(aws.Context, *ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool, ...request.Option) error
+
+ DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error)
+ DescribeVolumesWithContext(aws.Context, *ec2.DescribeVolumesInput, ...request.Option) (*ec2.DescribeVolumesOutput, error)
+ DescribeVolumesRequest(*ec2.DescribeVolumesInput) (*request.Request, *ec2.DescribeVolumesOutput)
+
+ DescribeVolumesPages(*ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool) error
+ DescribeVolumesPagesWithContext(aws.Context, *ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool, ...request.Option) error
+
+ DescribeVolumesModifications(*ec2.DescribeVolumesModificationsInput) (*ec2.DescribeVolumesModificationsOutput, error)
+ DescribeVolumesModificationsWithContext(aws.Context, *ec2.DescribeVolumesModificationsInput, ...request.Option) (*ec2.DescribeVolumesModificationsOutput, error)
+ DescribeVolumesModificationsRequest(*ec2.DescribeVolumesModificationsInput) (*request.Request, *ec2.DescribeVolumesModificationsOutput)
+
+ DescribeVolumesModificationsPages(*ec2.DescribeVolumesModificationsInput, func(*ec2.DescribeVolumesModificationsOutput, bool) bool) error
+ DescribeVolumesModificationsPagesWithContext(aws.Context, *ec2.DescribeVolumesModificationsInput, func(*ec2.DescribeVolumesModificationsOutput, bool) bool, ...request.Option) error
+
+ DescribeVpcAttribute(*ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error)
+ DescribeVpcAttributeWithContext(aws.Context, *ec2.DescribeVpcAttributeInput, ...request.Option) (*ec2.DescribeVpcAttributeOutput, error)
+ DescribeVpcAttributeRequest(*ec2.DescribeVpcAttributeInput) (*request.Request, *ec2.DescribeVpcAttributeOutput)
+
+ DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error)
+ DescribeVpcClassicLinkWithContext(aws.Context, *ec2.DescribeVpcClassicLinkInput, ...request.Option) (*ec2.DescribeVpcClassicLinkOutput, error)
+ DescribeVpcClassicLinkRequest(*ec2.DescribeVpcClassicLinkInput) (*request.Request, *ec2.DescribeVpcClassicLinkOutput)
+
+ DescribeVpcClassicLinkDnsSupport(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error)
+ DescribeVpcClassicLinkDnsSupportWithContext(aws.Context, *ec2.DescribeVpcClassicLinkDnsSupportInput, ...request.Option) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error)
+ DescribeVpcClassicLinkDnsSupportRequest(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DescribeVpcClassicLinkDnsSupportOutput)
+
+ DescribeVpcClassicLinkDnsSupportPages(*ec2.DescribeVpcClassicLinkDnsSupportInput, func(*ec2.DescribeVpcClassicLinkDnsSupportOutput, bool) bool) error
+ DescribeVpcClassicLinkDnsSupportPagesWithContext(aws.Context, *ec2.DescribeVpcClassicLinkDnsSupportInput, func(*ec2.DescribeVpcClassicLinkDnsSupportOutput, bool) bool, ...request.Option) error
+
+ DescribeVpcEndpointConnectionNotifications(*ec2.DescribeVpcEndpointConnectionNotificationsInput) (*ec2.DescribeVpcEndpointConnectionNotificationsOutput, error)
+ DescribeVpcEndpointConnectionNotificationsWithContext(aws.Context, *ec2.DescribeVpcEndpointConnectionNotificationsInput, ...request.Option) (*ec2.DescribeVpcEndpointConnectionNotificationsOutput, error)
+ DescribeVpcEndpointConnectionNotificationsRequest(*ec2.DescribeVpcEndpointConnectionNotificationsInput) (*request.Request, *ec2.DescribeVpcEndpointConnectionNotificationsOutput)
+
+ DescribeVpcEndpointConnectionNotificationsPages(*ec2.DescribeVpcEndpointConnectionNotificationsInput, func(*ec2.DescribeVpcEndpointConnectionNotificationsOutput, bool) bool) error
+ DescribeVpcEndpointConnectionNotificationsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointConnectionNotificationsInput, func(*ec2.DescribeVpcEndpointConnectionNotificationsOutput, bool) bool, ...request.Option) error
+
+ DescribeVpcEndpointConnections(*ec2.DescribeVpcEndpointConnectionsInput) (*ec2.DescribeVpcEndpointConnectionsOutput, error)
+ DescribeVpcEndpointConnectionsWithContext(aws.Context, *ec2.DescribeVpcEndpointConnectionsInput, ...request.Option) (*ec2.DescribeVpcEndpointConnectionsOutput, error)
+ DescribeVpcEndpointConnectionsRequest(*ec2.DescribeVpcEndpointConnectionsInput) (*request.Request, *ec2.DescribeVpcEndpointConnectionsOutput)
+
+ DescribeVpcEndpointConnectionsPages(*ec2.DescribeVpcEndpointConnectionsInput, func(*ec2.DescribeVpcEndpointConnectionsOutput, bool) bool) error
+ DescribeVpcEndpointConnectionsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointConnectionsInput, func(*ec2.DescribeVpcEndpointConnectionsOutput, bool) bool, ...request.Option) error
+
+ DescribeVpcEndpointServiceConfigurations(*ec2.DescribeVpcEndpointServiceConfigurationsInput) (*ec2.DescribeVpcEndpointServiceConfigurationsOutput, error)
+ DescribeVpcEndpointServiceConfigurationsWithContext(aws.Context, *ec2.DescribeVpcEndpointServiceConfigurationsInput, ...request.Option) (*ec2.DescribeVpcEndpointServiceConfigurationsOutput, error)
+ DescribeVpcEndpointServiceConfigurationsRequest(*ec2.DescribeVpcEndpointServiceConfigurationsInput) (*request.Request, *ec2.DescribeVpcEndpointServiceConfigurationsOutput)
+
+ DescribeVpcEndpointServiceConfigurationsPages(*ec2.DescribeVpcEndpointServiceConfigurationsInput, func(*ec2.DescribeVpcEndpointServiceConfigurationsOutput, bool) bool) error
+ DescribeVpcEndpointServiceConfigurationsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointServiceConfigurationsInput, func(*ec2.DescribeVpcEndpointServiceConfigurationsOutput, bool) bool, ...request.Option) error
+
+ DescribeVpcEndpointServicePermissions(*ec2.DescribeVpcEndpointServicePermissionsInput) (*ec2.DescribeVpcEndpointServicePermissionsOutput, error)
+ DescribeVpcEndpointServicePermissionsWithContext(aws.Context, *ec2.DescribeVpcEndpointServicePermissionsInput, ...request.Option) (*ec2.DescribeVpcEndpointServicePermissionsOutput, error)
+ DescribeVpcEndpointServicePermissionsRequest(*ec2.DescribeVpcEndpointServicePermissionsInput) (*request.Request, *ec2.DescribeVpcEndpointServicePermissionsOutput)
+
+ DescribeVpcEndpointServicePermissionsPages(*ec2.DescribeVpcEndpointServicePermissionsInput, func(*ec2.DescribeVpcEndpointServicePermissionsOutput, bool) bool) error
+ DescribeVpcEndpointServicePermissionsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointServicePermissionsInput, func(*ec2.DescribeVpcEndpointServicePermissionsOutput, bool) bool, ...request.Option) error
+
+ DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error)
+ DescribeVpcEndpointServicesWithContext(aws.Context, *ec2.DescribeVpcEndpointServicesInput, ...request.Option) (*ec2.DescribeVpcEndpointServicesOutput, error)
+ DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput)
+
+ DescribeVpcEndpoints(*ec2.DescribeVpcEndpointsInput) (*ec2.DescribeVpcEndpointsOutput, error)
+ DescribeVpcEndpointsWithContext(aws.Context, *ec2.DescribeVpcEndpointsInput, ...request.Option) (*ec2.DescribeVpcEndpointsOutput, error)
+ DescribeVpcEndpointsRequest(*ec2.DescribeVpcEndpointsInput) (*request.Request, *ec2.DescribeVpcEndpointsOutput)
+
+ DescribeVpcEndpointsPages(*ec2.DescribeVpcEndpointsInput, func(*ec2.DescribeVpcEndpointsOutput, bool) bool) error
+ DescribeVpcEndpointsPagesWithContext(aws.Context, *ec2.DescribeVpcEndpointsInput, func(*ec2.DescribeVpcEndpointsOutput, bool) bool, ...request.Option) error
+
+ DescribeVpcPeeringConnections(*ec2.DescribeVpcPeeringConnectionsInput) (*ec2.DescribeVpcPeeringConnectionsOutput, error)
+ DescribeVpcPeeringConnectionsWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, ...request.Option) (*ec2.DescribeVpcPeeringConnectionsOutput, error)
+ DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput)
+
+ DescribeVpcPeeringConnectionsPages(*ec2.DescribeVpcPeeringConnectionsInput, func(*ec2.DescribeVpcPeeringConnectionsOutput, bool) bool) error
+ DescribeVpcPeeringConnectionsPagesWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, func(*ec2.DescribeVpcPeeringConnectionsOutput, bool) bool, ...request.Option) error
+
+ DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error)
+ DescribeVpcsWithContext(aws.Context, *ec2.DescribeVpcsInput, ...request.Option) (*ec2.DescribeVpcsOutput, error)
+ DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput)
+
+ DescribeVpcsPages(*ec2.DescribeVpcsInput, func(*ec2.DescribeVpcsOutput, bool) bool) error
+ DescribeVpcsPagesWithContext(aws.Context, *ec2.DescribeVpcsInput, func(*ec2.DescribeVpcsOutput, bool) bool, ...request.Option) error
+
+ DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error)
+ DescribeVpnConnectionsWithContext(aws.Context, *ec2.DescribeVpnConnectionsInput, ...request.Option) (*ec2.DescribeVpnConnectionsOutput, error)
+ DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput)
+
+ DescribeVpnGateways(*ec2.DescribeVpnGatewaysInput) (*ec2.DescribeVpnGatewaysOutput, error)
+ DescribeVpnGatewaysWithContext(aws.Context, *ec2.DescribeVpnGatewaysInput, ...request.Option) (*ec2.DescribeVpnGatewaysOutput, error)
+ DescribeVpnGatewaysRequest(*ec2.DescribeVpnGatewaysInput) (*request.Request, *ec2.DescribeVpnGatewaysOutput)
+
+ DetachClassicLinkVpc(*ec2.DetachClassicLinkVpcInput) (*ec2.DetachClassicLinkVpcOutput, error)
+ DetachClassicLinkVpcWithContext(aws.Context, *ec2.DetachClassicLinkVpcInput, ...request.Option) (*ec2.DetachClassicLinkVpcOutput, error)
+ DetachClassicLinkVpcRequest(*ec2.DetachClassicLinkVpcInput) (*request.Request, *ec2.DetachClassicLinkVpcOutput)
+
+ DetachInternetGateway(*ec2.DetachInternetGatewayInput) (*ec2.DetachInternetGatewayOutput, error)
+ DetachInternetGatewayWithContext(aws.Context, *ec2.DetachInternetGatewayInput, ...request.Option) (*ec2.DetachInternetGatewayOutput, error)
+ DetachInternetGatewayRequest(*ec2.DetachInternetGatewayInput) (*request.Request, *ec2.DetachInternetGatewayOutput)
+
+ DetachNetworkInterface(*ec2.DetachNetworkInterfaceInput) (*ec2.DetachNetworkInterfaceOutput, error)
+ DetachNetworkInterfaceWithContext(aws.Context, *ec2.DetachNetworkInterfaceInput, ...request.Option) (*ec2.DetachNetworkInterfaceOutput, error)
+ DetachNetworkInterfaceRequest(*ec2.DetachNetworkInterfaceInput) (*request.Request, *ec2.DetachNetworkInterfaceOutput)
+
+ DetachVerifiedAccessTrustProvider(*ec2.DetachVerifiedAccessTrustProviderInput) (*ec2.DetachVerifiedAccessTrustProviderOutput, error)
+ DetachVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.DetachVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.DetachVerifiedAccessTrustProviderOutput, error)
+ DetachVerifiedAccessTrustProviderRequest(*ec2.DetachVerifiedAccessTrustProviderInput) (*request.Request, *ec2.DetachVerifiedAccessTrustProviderOutput)
+
+ DetachVolume(*ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error)
+ DetachVolumeWithContext(aws.Context, *ec2.DetachVolumeInput, ...request.Option) (*ec2.VolumeAttachment, error)
+ DetachVolumeRequest(*ec2.DetachVolumeInput) (*request.Request, *ec2.VolumeAttachment)
+
+ DetachVpnGateway(*ec2.DetachVpnGatewayInput) (*ec2.DetachVpnGatewayOutput, error)
+ DetachVpnGatewayWithContext(aws.Context, *ec2.DetachVpnGatewayInput, ...request.Option) (*ec2.DetachVpnGatewayOutput, error)
+ DetachVpnGatewayRequest(*ec2.DetachVpnGatewayInput) (*request.Request, *ec2.DetachVpnGatewayOutput)
+
+ DisableAddressTransfer(*ec2.DisableAddressTransferInput) (*ec2.DisableAddressTransferOutput, error)
+ DisableAddressTransferWithContext(aws.Context, *ec2.DisableAddressTransferInput, ...request.Option) (*ec2.DisableAddressTransferOutput, error)
+ DisableAddressTransferRequest(*ec2.DisableAddressTransferInput) (*request.Request, *ec2.DisableAddressTransferOutput)
+
+ DisableAwsNetworkPerformanceMetricSubscription(*ec2.DisableAwsNetworkPerformanceMetricSubscriptionInput) (*ec2.DisableAwsNetworkPerformanceMetricSubscriptionOutput, error)
+ DisableAwsNetworkPerformanceMetricSubscriptionWithContext(aws.Context, *ec2.DisableAwsNetworkPerformanceMetricSubscriptionInput, ...request.Option) (*ec2.DisableAwsNetworkPerformanceMetricSubscriptionOutput, error)
+ DisableAwsNetworkPerformanceMetricSubscriptionRequest(*ec2.DisableAwsNetworkPerformanceMetricSubscriptionInput) (*request.Request, *ec2.DisableAwsNetworkPerformanceMetricSubscriptionOutput)
+
+ DisableEbsEncryptionByDefault(*ec2.DisableEbsEncryptionByDefaultInput) (*ec2.DisableEbsEncryptionByDefaultOutput, error)
+ DisableEbsEncryptionByDefaultWithContext(aws.Context, *ec2.DisableEbsEncryptionByDefaultInput, ...request.Option) (*ec2.DisableEbsEncryptionByDefaultOutput, error)
+ DisableEbsEncryptionByDefaultRequest(*ec2.DisableEbsEncryptionByDefaultInput) (*request.Request, *ec2.DisableEbsEncryptionByDefaultOutput)
+
+ DisableFastLaunch(*ec2.DisableFastLaunchInput) (*ec2.DisableFastLaunchOutput, error)
+ DisableFastLaunchWithContext(aws.Context, *ec2.DisableFastLaunchInput, ...request.Option) (*ec2.DisableFastLaunchOutput, error)
+ DisableFastLaunchRequest(*ec2.DisableFastLaunchInput) (*request.Request, *ec2.DisableFastLaunchOutput)
+
+ DisableFastSnapshotRestores(*ec2.DisableFastSnapshotRestoresInput) (*ec2.DisableFastSnapshotRestoresOutput, error)
+ DisableFastSnapshotRestoresWithContext(aws.Context, *ec2.DisableFastSnapshotRestoresInput, ...request.Option) (*ec2.DisableFastSnapshotRestoresOutput, error)
+ DisableFastSnapshotRestoresRequest(*ec2.DisableFastSnapshotRestoresInput) (*request.Request, *ec2.DisableFastSnapshotRestoresOutput)
+
+ DisableImageDeprecation(*ec2.DisableImageDeprecationInput) (*ec2.DisableImageDeprecationOutput, error)
+ DisableImageDeprecationWithContext(aws.Context, *ec2.DisableImageDeprecationInput, ...request.Option) (*ec2.DisableImageDeprecationOutput, error)
+ DisableImageDeprecationRequest(*ec2.DisableImageDeprecationInput) (*request.Request, *ec2.DisableImageDeprecationOutput)
+
+ DisableIpamOrganizationAdminAccount(*ec2.DisableIpamOrganizationAdminAccountInput) (*ec2.DisableIpamOrganizationAdminAccountOutput, error)
+ DisableIpamOrganizationAdminAccountWithContext(aws.Context, *ec2.DisableIpamOrganizationAdminAccountInput, ...request.Option) (*ec2.DisableIpamOrganizationAdminAccountOutput, error)
+ DisableIpamOrganizationAdminAccountRequest(*ec2.DisableIpamOrganizationAdminAccountInput) (*request.Request, *ec2.DisableIpamOrganizationAdminAccountOutput)
+
+ DisableSerialConsoleAccess(*ec2.DisableSerialConsoleAccessInput) (*ec2.DisableSerialConsoleAccessOutput, error)
+ DisableSerialConsoleAccessWithContext(aws.Context, *ec2.DisableSerialConsoleAccessInput, ...request.Option) (*ec2.DisableSerialConsoleAccessOutput, error)
+ DisableSerialConsoleAccessRequest(*ec2.DisableSerialConsoleAccessInput) (*request.Request, *ec2.DisableSerialConsoleAccessOutput)
+
+ DisableTransitGatewayRouteTablePropagation(*ec2.DisableTransitGatewayRouteTablePropagationInput) (*ec2.DisableTransitGatewayRouteTablePropagationOutput, error)
+ DisableTransitGatewayRouteTablePropagationWithContext(aws.Context, *ec2.DisableTransitGatewayRouteTablePropagationInput, ...request.Option) (*ec2.DisableTransitGatewayRouteTablePropagationOutput, error)
+ DisableTransitGatewayRouteTablePropagationRequest(*ec2.DisableTransitGatewayRouteTablePropagationInput) (*request.Request, *ec2.DisableTransitGatewayRouteTablePropagationOutput)
+
+ DisableVgwRoutePropagation(*ec2.DisableVgwRoutePropagationInput) (*ec2.DisableVgwRoutePropagationOutput, error)
+ DisableVgwRoutePropagationWithContext(aws.Context, *ec2.DisableVgwRoutePropagationInput, ...request.Option) (*ec2.DisableVgwRoutePropagationOutput, error)
+ DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) (*request.Request, *ec2.DisableVgwRoutePropagationOutput)
+
+ DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error)
+ DisableVpcClassicLinkWithContext(aws.Context, *ec2.DisableVpcClassicLinkInput, ...request.Option) (*ec2.DisableVpcClassicLinkOutput, error)
+ DisableVpcClassicLinkRequest(*ec2.DisableVpcClassicLinkInput) (*request.Request, *ec2.DisableVpcClassicLinkOutput)
+
+ DisableVpcClassicLinkDnsSupport(*ec2.DisableVpcClassicLinkDnsSupportInput) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error)
+ DisableVpcClassicLinkDnsSupportWithContext(aws.Context, *ec2.DisableVpcClassicLinkDnsSupportInput, ...request.Option) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error)
+ DisableVpcClassicLinkDnsSupportRequest(*ec2.DisableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DisableVpcClassicLinkDnsSupportOutput)
+
+ DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error)
+ DisassociateAddressWithContext(aws.Context, *ec2.DisassociateAddressInput, ...request.Option) (*ec2.DisassociateAddressOutput, error)
+ DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput)
+
+ DisassociateClientVpnTargetNetwork(*ec2.DisassociateClientVpnTargetNetworkInput) (*ec2.DisassociateClientVpnTargetNetworkOutput, error)
+ DisassociateClientVpnTargetNetworkWithContext(aws.Context, *ec2.DisassociateClientVpnTargetNetworkInput, ...request.Option) (*ec2.DisassociateClientVpnTargetNetworkOutput, error)
+ DisassociateClientVpnTargetNetworkRequest(*ec2.DisassociateClientVpnTargetNetworkInput) (*request.Request, *ec2.DisassociateClientVpnTargetNetworkOutput)
+
+ DisassociateEnclaveCertificateIamRole(*ec2.DisassociateEnclaveCertificateIamRoleInput) (*ec2.DisassociateEnclaveCertificateIamRoleOutput, error)
+ DisassociateEnclaveCertificateIamRoleWithContext(aws.Context, *ec2.DisassociateEnclaveCertificateIamRoleInput, ...request.Option) (*ec2.DisassociateEnclaveCertificateIamRoleOutput, error)
+ DisassociateEnclaveCertificateIamRoleRequest(*ec2.DisassociateEnclaveCertificateIamRoleInput) (*request.Request, *ec2.DisassociateEnclaveCertificateIamRoleOutput)
+
+ DisassociateIamInstanceProfile(*ec2.DisassociateIamInstanceProfileInput) (*ec2.DisassociateIamInstanceProfileOutput, error)
+ DisassociateIamInstanceProfileWithContext(aws.Context, *ec2.DisassociateIamInstanceProfileInput, ...request.Option) (*ec2.DisassociateIamInstanceProfileOutput, error)
+ DisassociateIamInstanceProfileRequest(*ec2.DisassociateIamInstanceProfileInput) (*request.Request, *ec2.DisassociateIamInstanceProfileOutput)
+
+ DisassociateInstanceEventWindow(*ec2.DisassociateInstanceEventWindowInput) (*ec2.DisassociateInstanceEventWindowOutput, error)
+ DisassociateInstanceEventWindowWithContext(aws.Context, *ec2.DisassociateInstanceEventWindowInput, ...request.Option) (*ec2.DisassociateInstanceEventWindowOutput, error)
+ DisassociateInstanceEventWindowRequest(*ec2.DisassociateInstanceEventWindowInput) (*request.Request, *ec2.DisassociateInstanceEventWindowOutput)
+
+ DisassociateIpamResourceDiscovery(*ec2.DisassociateIpamResourceDiscoveryInput) (*ec2.DisassociateIpamResourceDiscoveryOutput, error)
+ DisassociateIpamResourceDiscoveryWithContext(aws.Context, *ec2.DisassociateIpamResourceDiscoveryInput, ...request.Option) (*ec2.DisassociateIpamResourceDiscoveryOutput, error)
+ DisassociateIpamResourceDiscoveryRequest(*ec2.DisassociateIpamResourceDiscoveryInput) (*request.Request, *ec2.DisassociateIpamResourceDiscoveryOutput)
+
+ DisassociateNatGatewayAddress(*ec2.DisassociateNatGatewayAddressInput) (*ec2.DisassociateNatGatewayAddressOutput, error)
+ DisassociateNatGatewayAddressWithContext(aws.Context, *ec2.DisassociateNatGatewayAddressInput, ...request.Option) (*ec2.DisassociateNatGatewayAddressOutput, error)
+ DisassociateNatGatewayAddressRequest(*ec2.DisassociateNatGatewayAddressInput) (*request.Request, *ec2.DisassociateNatGatewayAddressOutput)
+
+ DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error)
+ DisassociateRouteTableWithContext(aws.Context, *ec2.DisassociateRouteTableInput, ...request.Option) (*ec2.DisassociateRouteTableOutput, error)
+ DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput)
+
+ DisassociateSubnetCidrBlock(*ec2.DisassociateSubnetCidrBlockInput) (*ec2.DisassociateSubnetCidrBlockOutput, error)
+ DisassociateSubnetCidrBlockWithContext(aws.Context, *ec2.DisassociateSubnetCidrBlockInput, ...request.Option) (*ec2.DisassociateSubnetCidrBlockOutput, error)
+ DisassociateSubnetCidrBlockRequest(*ec2.DisassociateSubnetCidrBlockInput) (*request.Request, *ec2.DisassociateSubnetCidrBlockOutput)
+
+ DisassociateTransitGatewayMulticastDomain(*ec2.DisassociateTransitGatewayMulticastDomainInput) (*ec2.DisassociateTransitGatewayMulticastDomainOutput, error)
+ DisassociateTransitGatewayMulticastDomainWithContext(aws.Context, *ec2.DisassociateTransitGatewayMulticastDomainInput, ...request.Option) (*ec2.DisassociateTransitGatewayMulticastDomainOutput, error)
+ DisassociateTransitGatewayMulticastDomainRequest(*ec2.DisassociateTransitGatewayMulticastDomainInput) (*request.Request, *ec2.DisassociateTransitGatewayMulticastDomainOutput)
+
+ DisassociateTransitGatewayPolicyTable(*ec2.DisassociateTransitGatewayPolicyTableInput) (*ec2.DisassociateTransitGatewayPolicyTableOutput, error)
+ DisassociateTransitGatewayPolicyTableWithContext(aws.Context, *ec2.DisassociateTransitGatewayPolicyTableInput, ...request.Option) (*ec2.DisassociateTransitGatewayPolicyTableOutput, error)
+ DisassociateTransitGatewayPolicyTableRequest(*ec2.DisassociateTransitGatewayPolicyTableInput) (*request.Request, *ec2.DisassociateTransitGatewayPolicyTableOutput)
+
+ DisassociateTransitGatewayRouteTable(*ec2.DisassociateTransitGatewayRouteTableInput) (*ec2.DisassociateTransitGatewayRouteTableOutput, error)
+ DisassociateTransitGatewayRouteTableWithContext(aws.Context, *ec2.DisassociateTransitGatewayRouteTableInput, ...request.Option) (*ec2.DisassociateTransitGatewayRouteTableOutput, error)
+ DisassociateTransitGatewayRouteTableRequest(*ec2.DisassociateTransitGatewayRouteTableInput) (*request.Request, *ec2.DisassociateTransitGatewayRouteTableOutput)
+
+ DisassociateTrunkInterface(*ec2.DisassociateTrunkInterfaceInput) (*ec2.DisassociateTrunkInterfaceOutput, error)
+ DisassociateTrunkInterfaceWithContext(aws.Context, *ec2.DisassociateTrunkInterfaceInput, ...request.Option) (*ec2.DisassociateTrunkInterfaceOutput, error)
+ DisassociateTrunkInterfaceRequest(*ec2.DisassociateTrunkInterfaceInput) (*request.Request, *ec2.DisassociateTrunkInterfaceOutput)
+
+ DisassociateVpcCidrBlock(*ec2.DisassociateVpcCidrBlockInput) (*ec2.DisassociateVpcCidrBlockOutput, error)
+ DisassociateVpcCidrBlockWithContext(aws.Context, *ec2.DisassociateVpcCidrBlockInput, ...request.Option) (*ec2.DisassociateVpcCidrBlockOutput, error)
+ DisassociateVpcCidrBlockRequest(*ec2.DisassociateVpcCidrBlockInput) (*request.Request, *ec2.DisassociateVpcCidrBlockOutput)
+
+ EnableAddressTransfer(*ec2.EnableAddressTransferInput) (*ec2.EnableAddressTransferOutput, error)
+ EnableAddressTransferWithContext(aws.Context, *ec2.EnableAddressTransferInput, ...request.Option) (*ec2.EnableAddressTransferOutput, error)
+ EnableAddressTransferRequest(*ec2.EnableAddressTransferInput) (*request.Request, *ec2.EnableAddressTransferOutput)
+
+ EnableAwsNetworkPerformanceMetricSubscription(*ec2.EnableAwsNetworkPerformanceMetricSubscriptionInput) (*ec2.EnableAwsNetworkPerformanceMetricSubscriptionOutput, error)
+ EnableAwsNetworkPerformanceMetricSubscriptionWithContext(aws.Context, *ec2.EnableAwsNetworkPerformanceMetricSubscriptionInput, ...request.Option) (*ec2.EnableAwsNetworkPerformanceMetricSubscriptionOutput, error)
+ EnableAwsNetworkPerformanceMetricSubscriptionRequest(*ec2.EnableAwsNetworkPerformanceMetricSubscriptionInput) (*request.Request, *ec2.EnableAwsNetworkPerformanceMetricSubscriptionOutput)
+
+ EnableEbsEncryptionByDefault(*ec2.EnableEbsEncryptionByDefaultInput) (*ec2.EnableEbsEncryptionByDefaultOutput, error)
+ EnableEbsEncryptionByDefaultWithContext(aws.Context, *ec2.EnableEbsEncryptionByDefaultInput, ...request.Option) (*ec2.EnableEbsEncryptionByDefaultOutput, error)
+ EnableEbsEncryptionByDefaultRequest(*ec2.EnableEbsEncryptionByDefaultInput) (*request.Request, *ec2.EnableEbsEncryptionByDefaultOutput)
+
+ EnableFastLaunch(*ec2.EnableFastLaunchInput) (*ec2.EnableFastLaunchOutput, error)
+ EnableFastLaunchWithContext(aws.Context, *ec2.EnableFastLaunchInput, ...request.Option) (*ec2.EnableFastLaunchOutput, error)
+ EnableFastLaunchRequest(*ec2.EnableFastLaunchInput) (*request.Request, *ec2.EnableFastLaunchOutput)
+
+ EnableFastSnapshotRestores(*ec2.EnableFastSnapshotRestoresInput) (*ec2.EnableFastSnapshotRestoresOutput, error)
+ EnableFastSnapshotRestoresWithContext(aws.Context, *ec2.EnableFastSnapshotRestoresInput, ...request.Option) (*ec2.EnableFastSnapshotRestoresOutput, error)
+ EnableFastSnapshotRestoresRequest(*ec2.EnableFastSnapshotRestoresInput) (*request.Request, *ec2.EnableFastSnapshotRestoresOutput)
+
+ EnableImageDeprecation(*ec2.EnableImageDeprecationInput) (*ec2.EnableImageDeprecationOutput, error)
+ EnableImageDeprecationWithContext(aws.Context, *ec2.EnableImageDeprecationInput, ...request.Option) (*ec2.EnableImageDeprecationOutput, error)
+ EnableImageDeprecationRequest(*ec2.EnableImageDeprecationInput) (*request.Request, *ec2.EnableImageDeprecationOutput)
+
+ EnableIpamOrganizationAdminAccount(*ec2.EnableIpamOrganizationAdminAccountInput) (*ec2.EnableIpamOrganizationAdminAccountOutput, error)
+ EnableIpamOrganizationAdminAccountWithContext(aws.Context, *ec2.EnableIpamOrganizationAdminAccountInput, ...request.Option) (*ec2.EnableIpamOrganizationAdminAccountOutput, error)
+ EnableIpamOrganizationAdminAccountRequest(*ec2.EnableIpamOrganizationAdminAccountInput) (*request.Request, *ec2.EnableIpamOrganizationAdminAccountOutput)
+
+ EnableReachabilityAnalyzerOrganizationSharing(*ec2.EnableReachabilityAnalyzerOrganizationSharingInput) (*ec2.EnableReachabilityAnalyzerOrganizationSharingOutput, error)
+ EnableReachabilityAnalyzerOrganizationSharingWithContext(aws.Context, *ec2.EnableReachabilityAnalyzerOrganizationSharingInput, ...request.Option) (*ec2.EnableReachabilityAnalyzerOrganizationSharingOutput, error)
+ EnableReachabilityAnalyzerOrganizationSharingRequest(*ec2.EnableReachabilityAnalyzerOrganizationSharingInput) (*request.Request, *ec2.EnableReachabilityAnalyzerOrganizationSharingOutput)
+
+ EnableSerialConsoleAccess(*ec2.EnableSerialConsoleAccessInput) (*ec2.EnableSerialConsoleAccessOutput, error)
+ EnableSerialConsoleAccessWithContext(aws.Context, *ec2.EnableSerialConsoleAccessInput, ...request.Option) (*ec2.EnableSerialConsoleAccessOutput, error)
+ EnableSerialConsoleAccessRequest(*ec2.EnableSerialConsoleAccessInput) (*request.Request, *ec2.EnableSerialConsoleAccessOutput)
+
+ EnableTransitGatewayRouteTablePropagation(*ec2.EnableTransitGatewayRouteTablePropagationInput) (*ec2.EnableTransitGatewayRouteTablePropagationOutput, error)
+ EnableTransitGatewayRouteTablePropagationWithContext(aws.Context, *ec2.EnableTransitGatewayRouteTablePropagationInput, ...request.Option) (*ec2.EnableTransitGatewayRouteTablePropagationOutput, error)
+ EnableTransitGatewayRouteTablePropagationRequest(*ec2.EnableTransitGatewayRouteTablePropagationInput) (*request.Request, *ec2.EnableTransitGatewayRouteTablePropagationOutput)
+
+ EnableVgwRoutePropagation(*ec2.EnableVgwRoutePropagationInput) (*ec2.EnableVgwRoutePropagationOutput, error)
+ EnableVgwRoutePropagationWithContext(aws.Context, *ec2.EnableVgwRoutePropagationInput, ...request.Option) (*ec2.EnableVgwRoutePropagationOutput, error)
+ EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) (*request.Request, *ec2.EnableVgwRoutePropagationOutput)
+
+ EnableVolumeIO(*ec2.EnableVolumeIOInput) (*ec2.EnableVolumeIOOutput, error)
+ EnableVolumeIOWithContext(aws.Context, *ec2.EnableVolumeIOInput, ...request.Option) (*ec2.EnableVolumeIOOutput, error)
+ EnableVolumeIORequest(*ec2.EnableVolumeIOInput) (*request.Request, *ec2.EnableVolumeIOOutput)
+
+ EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error)
+ EnableVpcClassicLinkWithContext(aws.Context, *ec2.EnableVpcClassicLinkInput, ...request.Option) (*ec2.EnableVpcClassicLinkOutput, error)
+ EnableVpcClassicLinkRequest(*ec2.EnableVpcClassicLinkInput) (*request.Request, *ec2.EnableVpcClassicLinkOutput)
+
+ EnableVpcClassicLinkDnsSupport(*ec2.EnableVpcClassicLinkDnsSupportInput) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error)
+ EnableVpcClassicLinkDnsSupportWithContext(aws.Context, *ec2.EnableVpcClassicLinkDnsSupportInput, ...request.Option) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error)
+ EnableVpcClassicLinkDnsSupportRequest(*ec2.EnableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.EnableVpcClassicLinkDnsSupportOutput)
+
+ ExportClientVpnClientCertificateRevocationList(*ec2.ExportClientVpnClientCertificateRevocationListInput) (*ec2.ExportClientVpnClientCertificateRevocationListOutput, error)
+ ExportClientVpnClientCertificateRevocationListWithContext(aws.Context, *ec2.ExportClientVpnClientCertificateRevocationListInput, ...request.Option) (*ec2.ExportClientVpnClientCertificateRevocationListOutput, error)
+ ExportClientVpnClientCertificateRevocationListRequest(*ec2.ExportClientVpnClientCertificateRevocationListInput) (*request.Request, *ec2.ExportClientVpnClientCertificateRevocationListOutput)
+
+ ExportClientVpnClientConfiguration(*ec2.ExportClientVpnClientConfigurationInput) (*ec2.ExportClientVpnClientConfigurationOutput, error)
+ ExportClientVpnClientConfigurationWithContext(aws.Context, *ec2.ExportClientVpnClientConfigurationInput, ...request.Option) (*ec2.ExportClientVpnClientConfigurationOutput, error)
+ ExportClientVpnClientConfigurationRequest(*ec2.ExportClientVpnClientConfigurationInput) (*request.Request, *ec2.ExportClientVpnClientConfigurationOutput)
+
+ ExportImage(*ec2.ExportImageInput) (*ec2.ExportImageOutput, error)
+ ExportImageWithContext(aws.Context, *ec2.ExportImageInput, ...request.Option) (*ec2.ExportImageOutput, error)
+ ExportImageRequest(*ec2.ExportImageInput) (*request.Request, *ec2.ExportImageOutput)
+
+ ExportTransitGatewayRoutes(*ec2.ExportTransitGatewayRoutesInput) (*ec2.ExportTransitGatewayRoutesOutput, error)
+ ExportTransitGatewayRoutesWithContext(aws.Context, *ec2.ExportTransitGatewayRoutesInput, ...request.Option) (*ec2.ExportTransitGatewayRoutesOutput, error)
+ ExportTransitGatewayRoutesRequest(*ec2.ExportTransitGatewayRoutesInput) (*request.Request, *ec2.ExportTransitGatewayRoutesOutput)
+
+ GetAssociatedEnclaveCertificateIamRoles(*ec2.GetAssociatedEnclaveCertificateIamRolesInput) (*ec2.GetAssociatedEnclaveCertificateIamRolesOutput, error)
+ GetAssociatedEnclaveCertificateIamRolesWithContext(aws.Context, *ec2.GetAssociatedEnclaveCertificateIamRolesInput, ...request.Option) (*ec2.GetAssociatedEnclaveCertificateIamRolesOutput, error)
+ GetAssociatedEnclaveCertificateIamRolesRequest(*ec2.GetAssociatedEnclaveCertificateIamRolesInput) (*request.Request, *ec2.GetAssociatedEnclaveCertificateIamRolesOutput)
+
+ GetAssociatedIpv6PoolCidrs(*ec2.GetAssociatedIpv6PoolCidrsInput) (*ec2.GetAssociatedIpv6PoolCidrsOutput, error)
+ GetAssociatedIpv6PoolCidrsWithContext(aws.Context, *ec2.GetAssociatedIpv6PoolCidrsInput, ...request.Option) (*ec2.GetAssociatedIpv6PoolCidrsOutput, error)
+ GetAssociatedIpv6PoolCidrsRequest(*ec2.GetAssociatedIpv6PoolCidrsInput) (*request.Request, *ec2.GetAssociatedIpv6PoolCidrsOutput)
+
+ GetAssociatedIpv6PoolCidrsPages(*ec2.GetAssociatedIpv6PoolCidrsInput, func(*ec2.GetAssociatedIpv6PoolCidrsOutput, bool) bool) error
+ GetAssociatedIpv6PoolCidrsPagesWithContext(aws.Context, *ec2.GetAssociatedIpv6PoolCidrsInput, func(*ec2.GetAssociatedIpv6PoolCidrsOutput, bool) bool, ...request.Option) error
+
+ GetAwsNetworkPerformanceData(*ec2.GetAwsNetworkPerformanceDataInput) (*ec2.GetAwsNetworkPerformanceDataOutput, error)
+ GetAwsNetworkPerformanceDataWithContext(aws.Context, *ec2.GetAwsNetworkPerformanceDataInput, ...request.Option) (*ec2.GetAwsNetworkPerformanceDataOutput, error)
+ GetAwsNetworkPerformanceDataRequest(*ec2.GetAwsNetworkPerformanceDataInput) (*request.Request, *ec2.GetAwsNetworkPerformanceDataOutput)
+
+ GetAwsNetworkPerformanceDataPages(*ec2.GetAwsNetworkPerformanceDataInput, func(*ec2.GetAwsNetworkPerformanceDataOutput, bool) bool) error
+ GetAwsNetworkPerformanceDataPagesWithContext(aws.Context, *ec2.GetAwsNetworkPerformanceDataInput, func(*ec2.GetAwsNetworkPerformanceDataOutput, bool) bool, ...request.Option) error
+
+ GetCapacityReservationUsage(*ec2.GetCapacityReservationUsageInput) (*ec2.GetCapacityReservationUsageOutput, error)
+ GetCapacityReservationUsageWithContext(aws.Context, *ec2.GetCapacityReservationUsageInput, ...request.Option) (*ec2.GetCapacityReservationUsageOutput, error)
+ GetCapacityReservationUsageRequest(*ec2.GetCapacityReservationUsageInput) (*request.Request, *ec2.GetCapacityReservationUsageOutput)
+
+ GetCoipPoolUsage(*ec2.GetCoipPoolUsageInput) (*ec2.GetCoipPoolUsageOutput, error)
+ GetCoipPoolUsageWithContext(aws.Context, *ec2.GetCoipPoolUsageInput, ...request.Option) (*ec2.GetCoipPoolUsageOutput, error)
+ GetCoipPoolUsageRequest(*ec2.GetCoipPoolUsageInput) (*request.Request, *ec2.GetCoipPoolUsageOutput)
+
+ GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error)
+ GetConsoleOutputWithContext(aws.Context, *ec2.GetConsoleOutputInput, ...request.Option) (*ec2.GetConsoleOutputOutput, error)
+ GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput)
+
+ GetConsoleScreenshot(*ec2.GetConsoleScreenshotInput) (*ec2.GetConsoleScreenshotOutput, error)
+ GetConsoleScreenshotWithContext(aws.Context, *ec2.GetConsoleScreenshotInput, ...request.Option) (*ec2.GetConsoleScreenshotOutput, error)
+ GetConsoleScreenshotRequest(*ec2.GetConsoleScreenshotInput) (*request.Request, *ec2.GetConsoleScreenshotOutput)
+
+ GetDefaultCreditSpecification(*ec2.GetDefaultCreditSpecificationInput) (*ec2.GetDefaultCreditSpecificationOutput, error)
+ GetDefaultCreditSpecificationWithContext(aws.Context, *ec2.GetDefaultCreditSpecificationInput, ...request.Option) (*ec2.GetDefaultCreditSpecificationOutput, error)
+ GetDefaultCreditSpecificationRequest(*ec2.GetDefaultCreditSpecificationInput) (*request.Request, *ec2.GetDefaultCreditSpecificationOutput)
+
+ GetEbsDefaultKmsKeyId(*ec2.GetEbsDefaultKmsKeyIdInput) (*ec2.GetEbsDefaultKmsKeyIdOutput, error)
+ GetEbsDefaultKmsKeyIdWithContext(aws.Context, *ec2.GetEbsDefaultKmsKeyIdInput, ...request.Option) (*ec2.GetEbsDefaultKmsKeyIdOutput, error)
+ GetEbsDefaultKmsKeyIdRequest(*ec2.GetEbsDefaultKmsKeyIdInput) (*request.Request, *ec2.GetEbsDefaultKmsKeyIdOutput)
+
+ GetEbsEncryptionByDefault(*ec2.GetEbsEncryptionByDefaultInput) (*ec2.GetEbsEncryptionByDefaultOutput, error)
+ GetEbsEncryptionByDefaultWithContext(aws.Context, *ec2.GetEbsEncryptionByDefaultInput, ...request.Option) (*ec2.GetEbsEncryptionByDefaultOutput, error)
+ GetEbsEncryptionByDefaultRequest(*ec2.GetEbsEncryptionByDefaultInput) (*request.Request, *ec2.GetEbsEncryptionByDefaultOutput)
+
+ GetFlowLogsIntegrationTemplate(*ec2.GetFlowLogsIntegrationTemplateInput) (*ec2.GetFlowLogsIntegrationTemplateOutput, error)
+ GetFlowLogsIntegrationTemplateWithContext(aws.Context, *ec2.GetFlowLogsIntegrationTemplateInput, ...request.Option) (*ec2.GetFlowLogsIntegrationTemplateOutput, error)
+ GetFlowLogsIntegrationTemplateRequest(*ec2.GetFlowLogsIntegrationTemplateInput) (*request.Request, *ec2.GetFlowLogsIntegrationTemplateOutput)
+
+ GetGroupsForCapacityReservation(*ec2.GetGroupsForCapacityReservationInput) (*ec2.GetGroupsForCapacityReservationOutput, error)
+ GetGroupsForCapacityReservationWithContext(aws.Context, *ec2.GetGroupsForCapacityReservationInput, ...request.Option) (*ec2.GetGroupsForCapacityReservationOutput, error)
+ GetGroupsForCapacityReservationRequest(*ec2.GetGroupsForCapacityReservationInput) (*request.Request, *ec2.GetGroupsForCapacityReservationOutput)
+
+ GetGroupsForCapacityReservationPages(*ec2.GetGroupsForCapacityReservationInput, func(*ec2.GetGroupsForCapacityReservationOutput, bool) bool) error
+ GetGroupsForCapacityReservationPagesWithContext(aws.Context, *ec2.GetGroupsForCapacityReservationInput, func(*ec2.GetGroupsForCapacityReservationOutput, bool) bool, ...request.Option) error
+
+ GetHostReservationPurchasePreview(*ec2.GetHostReservationPurchasePreviewInput) (*ec2.GetHostReservationPurchasePreviewOutput, error)
+ GetHostReservationPurchasePreviewWithContext(aws.Context, *ec2.GetHostReservationPurchasePreviewInput, ...request.Option) (*ec2.GetHostReservationPurchasePreviewOutput, error)
+ GetHostReservationPurchasePreviewRequest(*ec2.GetHostReservationPurchasePreviewInput) (*request.Request, *ec2.GetHostReservationPurchasePreviewOutput)
+
+ GetInstanceTypesFromInstanceRequirements(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error)
+ GetInstanceTypesFromInstanceRequirementsWithContext(aws.Context, *ec2.GetInstanceTypesFromInstanceRequirementsInput, ...request.Option) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error)
+ GetInstanceTypesFromInstanceRequirementsRequest(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*request.Request, *ec2.GetInstanceTypesFromInstanceRequirementsOutput)
+
+ GetInstanceTypesFromInstanceRequirementsPages(*ec2.GetInstanceTypesFromInstanceRequirementsInput, func(*ec2.GetInstanceTypesFromInstanceRequirementsOutput, bool) bool) error
+ GetInstanceTypesFromInstanceRequirementsPagesWithContext(aws.Context, *ec2.GetInstanceTypesFromInstanceRequirementsInput, func(*ec2.GetInstanceTypesFromInstanceRequirementsOutput, bool) bool, ...request.Option) error
+
+ GetInstanceUefiData(*ec2.GetInstanceUefiDataInput) (*ec2.GetInstanceUefiDataOutput, error)
+ GetInstanceUefiDataWithContext(aws.Context, *ec2.GetInstanceUefiDataInput, ...request.Option) (*ec2.GetInstanceUefiDataOutput, error)
+ GetInstanceUefiDataRequest(*ec2.GetInstanceUefiDataInput) (*request.Request, *ec2.GetInstanceUefiDataOutput)
+
+ GetIpamAddressHistory(*ec2.GetIpamAddressHistoryInput) (*ec2.GetIpamAddressHistoryOutput, error)
+ GetIpamAddressHistoryWithContext(aws.Context, *ec2.GetIpamAddressHistoryInput, ...request.Option) (*ec2.GetIpamAddressHistoryOutput, error)
+ GetIpamAddressHistoryRequest(*ec2.GetIpamAddressHistoryInput) (*request.Request, *ec2.GetIpamAddressHistoryOutput)
+
+ GetIpamAddressHistoryPages(*ec2.GetIpamAddressHistoryInput, func(*ec2.GetIpamAddressHistoryOutput, bool) bool) error
+ GetIpamAddressHistoryPagesWithContext(aws.Context, *ec2.GetIpamAddressHistoryInput, func(*ec2.GetIpamAddressHistoryOutput, bool) bool, ...request.Option) error
+
+ GetIpamDiscoveredAccounts(*ec2.GetIpamDiscoveredAccountsInput) (*ec2.GetIpamDiscoveredAccountsOutput, error)
+ GetIpamDiscoveredAccountsWithContext(aws.Context, *ec2.GetIpamDiscoveredAccountsInput, ...request.Option) (*ec2.GetIpamDiscoveredAccountsOutput, error)
+ GetIpamDiscoveredAccountsRequest(*ec2.GetIpamDiscoveredAccountsInput) (*request.Request, *ec2.GetIpamDiscoveredAccountsOutput)
+
+ GetIpamDiscoveredAccountsPages(*ec2.GetIpamDiscoveredAccountsInput, func(*ec2.GetIpamDiscoveredAccountsOutput, bool) bool) error
+ GetIpamDiscoveredAccountsPagesWithContext(aws.Context, *ec2.GetIpamDiscoveredAccountsInput, func(*ec2.GetIpamDiscoveredAccountsOutput, bool) bool, ...request.Option) error
+
+ GetIpamDiscoveredResourceCidrs(*ec2.GetIpamDiscoveredResourceCidrsInput) (*ec2.GetIpamDiscoveredResourceCidrsOutput, error)
+ GetIpamDiscoveredResourceCidrsWithContext(aws.Context, *ec2.GetIpamDiscoveredResourceCidrsInput, ...request.Option) (*ec2.GetIpamDiscoveredResourceCidrsOutput, error)
+ GetIpamDiscoveredResourceCidrsRequest(*ec2.GetIpamDiscoveredResourceCidrsInput) (*request.Request, *ec2.GetIpamDiscoveredResourceCidrsOutput)
+
+ GetIpamDiscoveredResourceCidrsPages(*ec2.GetIpamDiscoveredResourceCidrsInput, func(*ec2.GetIpamDiscoveredResourceCidrsOutput, bool) bool) error
+ GetIpamDiscoveredResourceCidrsPagesWithContext(aws.Context, *ec2.GetIpamDiscoveredResourceCidrsInput, func(*ec2.GetIpamDiscoveredResourceCidrsOutput, bool) bool, ...request.Option) error
+
+ GetIpamPoolAllocations(*ec2.GetIpamPoolAllocationsInput) (*ec2.GetIpamPoolAllocationsOutput, error)
+ GetIpamPoolAllocationsWithContext(aws.Context, *ec2.GetIpamPoolAllocationsInput, ...request.Option) (*ec2.GetIpamPoolAllocationsOutput, error)
+ GetIpamPoolAllocationsRequest(*ec2.GetIpamPoolAllocationsInput) (*request.Request, *ec2.GetIpamPoolAllocationsOutput)
+
+ GetIpamPoolAllocationsPages(*ec2.GetIpamPoolAllocationsInput, func(*ec2.GetIpamPoolAllocationsOutput, bool) bool) error
+ GetIpamPoolAllocationsPagesWithContext(aws.Context, *ec2.GetIpamPoolAllocationsInput, func(*ec2.GetIpamPoolAllocationsOutput, bool) bool, ...request.Option) error
+
+ GetIpamPoolCidrs(*ec2.GetIpamPoolCidrsInput) (*ec2.GetIpamPoolCidrsOutput, error)
+ GetIpamPoolCidrsWithContext(aws.Context, *ec2.GetIpamPoolCidrsInput, ...request.Option) (*ec2.GetIpamPoolCidrsOutput, error)
+ GetIpamPoolCidrsRequest(*ec2.GetIpamPoolCidrsInput) (*request.Request, *ec2.GetIpamPoolCidrsOutput)
+
+ GetIpamPoolCidrsPages(*ec2.GetIpamPoolCidrsInput, func(*ec2.GetIpamPoolCidrsOutput, bool) bool) error
+ GetIpamPoolCidrsPagesWithContext(aws.Context, *ec2.GetIpamPoolCidrsInput, func(*ec2.GetIpamPoolCidrsOutput, bool) bool, ...request.Option) error
+
+ GetIpamResourceCidrs(*ec2.GetIpamResourceCidrsInput) (*ec2.GetIpamResourceCidrsOutput, error)
+ GetIpamResourceCidrsWithContext(aws.Context, *ec2.GetIpamResourceCidrsInput, ...request.Option) (*ec2.GetIpamResourceCidrsOutput, error)
+ GetIpamResourceCidrsRequest(*ec2.GetIpamResourceCidrsInput) (*request.Request, *ec2.GetIpamResourceCidrsOutput)
+
+ GetIpamResourceCidrsPages(*ec2.GetIpamResourceCidrsInput, func(*ec2.GetIpamResourceCidrsOutput, bool) bool) error
+ GetIpamResourceCidrsPagesWithContext(aws.Context, *ec2.GetIpamResourceCidrsInput, func(*ec2.GetIpamResourceCidrsOutput, bool) bool, ...request.Option) error
+
+ GetLaunchTemplateData(*ec2.GetLaunchTemplateDataInput) (*ec2.GetLaunchTemplateDataOutput, error)
+ GetLaunchTemplateDataWithContext(aws.Context, *ec2.GetLaunchTemplateDataInput, ...request.Option) (*ec2.GetLaunchTemplateDataOutput, error)
+ GetLaunchTemplateDataRequest(*ec2.GetLaunchTemplateDataInput) (*request.Request, *ec2.GetLaunchTemplateDataOutput)
+
+ GetManagedPrefixListAssociations(*ec2.GetManagedPrefixListAssociationsInput) (*ec2.GetManagedPrefixListAssociationsOutput, error)
+ GetManagedPrefixListAssociationsWithContext(aws.Context, *ec2.GetManagedPrefixListAssociationsInput, ...request.Option) (*ec2.GetManagedPrefixListAssociationsOutput, error)
+ GetManagedPrefixListAssociationsRequest(*ec2.GetManagedPrefixListAssociationsInput) (*request.Request, *ec2.GetManagedPrefixListAssociationsOutput)
+
+ GetManagedPrefixListAssociationsPages(*ec2.GetManagedPrefixListAssociationsInput, func(*ec2.GetManagedPrefixListAssociationsOutput, bool) bool) error
+ GetManagedPrefixListAssociationsPagesWithContext(aws.Context, *ec2.GetManagedPrefixListAssociationsInput, func(*ec2.GetManagedPrefixListAssociationsOutput, bool) bool, ...request.Option) error
+
+ GetManagedPrefixListEntries(*ec2.GetManagedPrefixListEntriesInput) (*ec2.GetManagedPrefixListEntriesOutput, error)
+ GetManagedPrefixListEntriesWithContext(aws.Context, *ec2.GetManagedPrefixListEntriesInput, ...request.Option) (*ec2.GetManagedPrefixListEntriesOutput, error)
+ GetManagedPrefixListEntriesRequest(*ec2.GetManagedPrefixListEntriesInput) (*request.Request, *ec2.GetManagedPrefixListEntriesOutput)
+
+ GetManagedPrefixListEntriesPages(*ec2.GetManagedPrefixListEntriesInput, func(*ec2.GetManagedPrefixListEntriesOutput, bool) bool) error
+ GetManagedPrefixListEntriesPagesWithContext(aws.Context, *ec2.GetManagedPrefixListEntriesInput, func(*ec2.GetManagedPrefixListEntriesOutput, bool) bool, ...request.Option) error
+
+ GetNetworkInsightsAccessScopeAnalysisFindings(*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsInput) (*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput, error)
+ GetNetworkInsightsAccessScopeAnalysisFindingsWithContext(aws.Context, *ec2.GetNetworkInsightsAccessScopeAnalysisFindingsInput, ...request.Option) (*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput, error)
+ GetNetworkInsightsAccessScopeAnalysisFindingsRequest(*ec2.GetNetworkInsightsAccessScopeAnalysisFindingsInput) (*request.Request, *ec2.GetNetworkInsightsAccessScopeAnalysisFindingsOutput)
+
+ GetNetworkInsightsAccessScopeContent(*ec2.GetNetworkInsightsAccessScopeContentInput) (*ec2.GetNetworkInsightsAccessScopeContentOutput, error)
+ GetNetworkInsightsAccessScopeContentWithContext(aws.Context, *ec2.GetNetworkInsightsAccessScopeContentInput, ...request.Option) (*ec2.GetNetworkInsightsAccessScopeContentOutput, error)
+ GetNetworkInsightsAccessScopeContentRequest(*ec2.GetNetworkInsightsAccessScopeContentInput) (*request.Request, *ec2.GetNetworkInsightsAccessScopeContentOutput)
+
+ GetPasswordData(*ec2.GetPasswordDataInput) (*ec2.GetPasswordDataOutput, error)
+ GetPasswordDataWithContext(aws.Context, *ec2.GetPasswordDataInput, ...request.Option) (*ec2.GetPasswordDataOutput, error)
+ GetPasswordDataRequest(*ec2.GetPasswordDataInput) (*request.Request, *ec2.GetPasswordDataOutput)
+
+ GetReservedInstancesExchangeQuote(*ec2.GetReservedInstancesExchangeQuoteInput) (*ec2.GetReservedInstancesExchangeQuoteOutput, error)
+ GetReservedInstancesExchangeQuoteWithContext(aws.Context, *ec2.GetReservedInstancesExchangeQuoteInput, ...request.Option) (*ec2.GetReservedInstancesExchangeQuoteOutput, error)
+ GetReservedInstancesExchangeQuoteRequest(*ec2.GetReservedInstancesExchangeQuoteInput) (*request.Request, *ec2.GetReservedInstancesExchangeQuoteOutput)
+
+ GetSerialConsoleAccessStatus(*ec2.GetSerialConsoleAccessStatusInput) (*ec2.GetSerialConsoleAccessStatusOutput, error)
+ GetSerialConsoleAccessStatusWithContext(aws.Context, *ec2.GetSerialConsoleAccessStatusInput, ...request.Option) (*ec2.GetSerialConsoleAccessStatusOutput, error)
+ GetSerialConsoleAccessStatusRequest(*ec2.GetSerialConsoleAccessStatusInput) (*request.Request, *ec2.GetSerialConsoleAccessStatusOutput)
+
+ GetSpotPlacementScores(*ec2.GetSpotPlacementScoresInput) (*ec2.GetSpotPlacementScoresOutput, error)
+ GetSpotPlacementScoresWithContext(aws.Context, *ec2.GetSpotPlacementScoresInput, ...request.Option) (*ec2.GetSpotPlacementScoresOutput, error)
+ GetSpotPlacementScoresRequest(*ec2.GetSpotPlacementScoresInput) (*request.Request, *ec2.GetSpotPlacementScoresOutput)
+
+ GetSpotPlacementScoresPages(*ec2.GetSpotPlacementScoresInput, func(*ec2.GetSpotPlacementScoresOutput, bool) bool) error
+ GetSpotPlacementScoresPagesWithContext(aws.Context, *ec2.GetSpotPlacementScoresInput, func(*ec2.GetSpotPlacementScoresOutput, bool) bool, ...request.Option) error
+
+ GetSubnetCidrReservations(*ec2.GetSubnetCidrReservationsInput) (*ec2.GetSubnetCidrReservationsOutput, error)
+ GetSubnetCidrReservationsWithContext(aws.Context, *ec2.GetSubnetCidrReservationsInput, ...request.Option) (*ec2.GetSubnetCidrReservationsOutput, error)
+ GetSubnetCidrReservationsRequest(*ec2.GetSubnetCidrReservationsInput) (*request.Request, *ec2.GetSubnetCidrReservationsOutput)
+
+ GetTransitGatewayAttachmentPropagations(*ec2.GetTransitGatewayAttachmentPropagationsInput) (*ec2.GetTransitGatewayAttachmentPropagationsOutput, error)
+ GetTransitGatewayAttachmentPropagationsWithContext(aws.Context, *ec2.GetTransitGatewayAttachmentPropagationsInput, ...request.Option) (*ec2.GetTransitGatewayAttachmentPropagationsOutput, error)
+ GetTransitGatewayAttachmentPropagationsRequest(*ec2.GetTransitGatewayAttachmentPropagationsInput) (*request.Request, *ec2.GetTransitGatewayAttachmentPropagationsOutput)
+
+ GetTransitGatewayAttachmentPropagationsPages(*ec2.GetTransitGatewayAttachmentPropagationsInput, func(*ec2.GetTransitGatewayAttachmentPropagationsOutput, bool) bool) error
+ GetTransitGatewayAttachmentPropagationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayAttachmentPropagationsInput, func(*ec2.GetTransitGatewayAttachmentPropagationsOutput, bool) bool, ...request.Option) error
+
+ GetTransitGatewayMulticastDomainAssociations(*ec2.GetTransitGatewayMulticastDomainAssociationsInput) (*ec2.GetTransitGatewayMulticastDomainAssociationsOutput, error)
+ GetTransitGatewayMulticastDomainAssociationsWithContext(aws.Context, *ec2.GetTransitGatewayMulticastDomainAssociationsInput, ...request.Option) (*ec2.GetTransitGatewayMulticastDomainAssociationsOutput, error)
+ GetTransitGatewayMulticastDomainAssociationsRequest(*ec2.GetTransitGatewayMulticastDomainAssociationsInput) (*request.Request, *ec2.GetTransitGatewayMulticastDomainAssociationsOutput)
+
+ GetTransitGatewayMulticastDomainAssociationsPages(*ec2.GetTransitGatewayMulticastDomainAssociationsInput, func(*ec2.GetTransitGatewayMulticastDomainAssociationsOutput, bool) bool) error
+ GetTransitGatewayMulticastDomainAssociationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayMulticastDomainAssociationsInput, func(*ec2.GetTransitGatewayMulticastDomainAssociationsOutput, bool) bool, ...request.Option) error
+
+ GetTransitGatewayPolicyTableAssociations(*ec2.GetTransitGatewayPolicyTableAssociationsInput) (*ec2.GetTransitGatewayPolicyTableAssociationsOutput, error)
+ GetTransitGatewayPolicyTableAssociationsWithContext(aws.Context, *ec2.GetTransitGatewayPolicyTableAssociationsInput, ...request.Option) (*ec2.GetTransitGatewayPolicyTableAssociationsOutput, error)
+ GetTransitGatewayPolicyTableAssociationsRequest(*ec2.GetTransitGatewayPolicyTableAssociationsInput) (*request.Request, *ec2.GetTransitGatewayPolicyTableAssociationsOutput)
+
+ GetTransitGatewayPolicyTableAssociationsPages(*ec2.GetTransitGatewayPolicyTableAssociationsInput, func(*ec2.GetTransitGatewayPolicyTableAssociationsOutput, bool) bool) error
+ GetTransitGatewayPolicyTableAssociationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayPolicyTableAssociationsInput, func(*ec2.GetTransitGatewayPolicyTableAssociationsOutput, bool) bool, ...request.Option) error
+
+ GetTransitGatewayPolicyTableEntries(*ec2.GetTransitGatewayPolicyTableEntriesInput) (*ec2.GetTransitGatewayPolicyTableEntriesOutput, error)
+ GetTransitGatewayPolicyTableEntriesWithContext(aws.Context, *ec2.GetTransitGatewayPolicyTableEntriesInput, ...request.Option) (*ec2.GetTransitGatewayPolicyTableEntriesOutput, error)
+ GetTransitGatewayPolicyTableEntriesRequest(*ec2.GetTransitGatewayPolicyTableEntriesInput) (*request.Request, *ec2.GetTransitGatewayPolicyTableEntriesOutput)
+
+ GetTransitGatewayPrefixListReferences(*ec2.GetTransitGatewayPrefixListReferencesInput) (*ec2.GetTransitGatewayPrefixListReferencesOutput, error)
+ GetTransitGatewayPrefixListReferencesWithContext(aws.Context, *ec2.GetTransitGatewayPrefixListReferencesInput, ...request.Option) (*ec2.GetTransitGatewayPrefixListReferencesOutput, error)
+ GetTransitGatewayPrefixListReferencesRequest(*ec2.GetTransitGatewayPrefixListReferencesInput) (*request.Request, *ec2.GetTransitGatewayPrefixListReferencesOutput)
+
+ GetTransitGatewayPrefixListReferencesPages(*ec2.GetTransitGatewayPrefixListReferencesInput, func(*ec2.GetTransitGatewayPrefixListReferencesOutput, bool) bool) error
+ GetTransitGatewayPrefixListReferencesPagesWithContext(aws.Context, *ec2.GetTransitGatewayPrefixListReferencesInput, func(*ec2.GetTransitGatewayPrefixListReferencesOutput, bool) bool, ...request.Option) error
+
+ GetTransitGatewayRouteTableAssociations(*ec2.GetTransitGatewayRouteTableAssociationsInput) (*ec2.GetTransitGatewayRouteTableAssociationsOutput, error)
+ GetTransitGatewayRouteTableAssociationsWithContext(aws.Context, *ec2.GetTransitGatewayRouteTableAssociationsInput, ...request.Option) (*ec2.GetTransitGatewayRouteTableAssociationsOutput, error)
+ GetTransitGatewayRouteTableAssociationsRequest(*ec2.GetTransitGatewayRouteTableAssociationsInput) (*request.Request, *ec2.GetTransitGatewayRouteTableAssociationsOutput)
+
+ GetTransitGatewayRouteTableAssociationsPages(*ec2.GetTransitGatewayRouteTableAssociationsInput, func(*ec2.GetTransitGatewayRouteTableAssociationsOutput, bool) bool) error
+ GetTransitGatewayRouteTableAssociationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayRouteTableAssociationsInput, func(*ec2.GetTransitGatewayRouteTableAssociationsOutput, bool) bool, ...request.Option) error
+
+ GetTransitGatewayRouteTablePropagations(*ec2.GetTransitGatewayRouteTablePropagationsInput) (*ec2.GetTransitGatewayRouteTablePropagationsOutput, error)
+ GetTransitGatewayRouteTablePropagationsWithContext(aws.Context, *ec2.GetTransitGatewayRouteTablePropagationsInput, ...request.Option) (*ec2.GetTransitGatewayRouteTablePropagationsOutput, error)
+ GetTransitGatewayRouteTablePropagationsRequest(*ec2.GetTransitGatewayRouteTablePropagationsInput) (*request.Request, *ec2.GetTransitGatewayRouteTablePropagationsOutput)
+
+ GetTransitGatewayRouteTablePropagationsPages(*ec2.GetTransitGatewayRouteTablePropagationsInput, func(*ec2.GetTransitGatewayRouteTablePropagationsOutput, bool) bool) error
+ GetTransitGatewayRouteTablePropagationsPagesWithContext(aws.Context, *ec2.GetTransitGatewayRouteTablePropagationsInput, func(*ec2.GetTransitGatewayRouteTablePropagationsOutput, bool) bool, ...request.Option) error
+
+ GetVerifiedAccessEndpointPolicy(*ec2.GetVerifiedAccessEndpointPolicyInput) (*ec2.GetVerifiedAccessEndpointPolicyOutput, error)
+ GetVerifiedAccessEndpointPolicyWithContext(aws.Context, *ec2.GetVerifiedAccessEndpointPolicyInput, ...request.Option) (*ec2.GetVerifiedAccessEndpointPolicyOutput, error)
+ GetVerifiedAccessEndpointPolicyRequest(*ec2.GetVerifiedAccessEndpointPolicyInput) (*request.Request, *ec2.GetVerifiedAccessEndpointPolicyOutput)
+
+ GetVerifiedAccessGroupPolicy(*ec2.GetVerifiedAccessGroupPolicyInput) (*ec2.GetVerifiedAccessGroupPolicyOutput, error)
+ GetVerifiedAccessGroupPolicyWithContext(aws.Context, *ec2.GetVerifiedAccessGroupPolicyInput, ...request.Option) (*ec2.GetVerifiedAccessGroupPolicyOutput, error)
+ GetVerifiedAccessGroupPolicyRequest(*ec2.GetVerifiedAccessGroupPolicyInput) (*request.Request, *ec2.GetVerifiedAccessGroupPolicyOutput)
+
+ GetVpnConnectionDeviceSampleConfiguration(*ec2.GetVpnConnectionDeviceSampleConfigurationInput) (*ec2.GetVpnConnectionDeviceSampleConfigurationOutput, error)
+ GetVpnConnectionDeviceSampleConfigurationWithContext(aws.Context, *ec2.GetVpnConnectionDeviceSampleConfigurationInput, ...request.Option) (*ec2.GetVpnConnectionDeviceSampleConfigurationOutput, error)
+ GetVpnConnectionDeviceSampleConfigurationRequest(*ec2.GetVpnConnectionDeviceSampleConfigurationInput) (*request.Request, *ec2.GetVpnConnectionDeviceSampleConfigurationOutput)
+
+ GetVpnConnectionDeviceTypes(*ec2.GetVpnConnectionDeviceTypesInput) (*ec2.GetVpnConnectionDeviceTypesOutput, error)
+ GetVpnConnectionDeviceTypesWithContext(aws.Context, *ec2.GetVpnConnectionDeviceTypesInput, ...request.Option) (*ec2.GetVpnConnectionDeviceTypesOutput, error)
+ GetVpnConnectionDeviceTypesRequest(*ec2.GetVpnConnectionDeviceTypesInput) (*request.Request, *ec2.GetVpnConnectionDeviceTypesOutput)
+
+ GetVpnConnectionDeviceTypesPages(*ec2.GetVpnConnectionDeviceTypesInput, func(*ec2.GetVpnConnectionDeviceTypesOutput, bool) bool) error
+ GetVpnConnectionDeviceTypesPagesWithContext(aws.Context, *ec2.GetVpnConnectionDeviceTypesInput, func(*ec2.GetVpnConnectionDeviceTypesOutput, bool) bool, ...request.Option) error
+
+ GetVpnTunnelReplacementStatus(*ec2.GetVpnTunnelReplacementStatusInput) (*ec2.GetVpnTunnelReplacementStatusOutput, error)
+ GetVpnTunnelReplacementStatusWithContext(aws.Context, *ec2.GetVpnTunnelReplacementStatusInput, ...request.Option) (*ec2.GetVpnTunnelReplacementStatusOutput, error)
+ GetVpnTunnelReplacementStatusRequest(*ec2.GetVpnTunnelReplacementStatusInput) (*request.Request, *ec2.GetVpnTunnelReplacementStatusOutput)
+
+ ImportClientVpnClientCertificateRevocationList(*ec2.ImportClientVpnClientCertificateRevocationListInput) (*ec2.ImportClientVpnClientCertificateRevocationListOutput, error)
+ ImportClientVpnClientCertificateRevocationListWithContext(aws.Context, *ec2.ImportClientVpnClientCertificateRevocationListInput, ...request.Option) (*ec2.ImportClientVpnClientCertificateRevocationListOutput, error)
+ ImportClientVpnClientCertificateRevocationListRequest(*ec2.ImportClientVpnClientCertificateRevocationListInput) (*request.Request, *ec2.ImportClientVpnClientCertificateRevocationListOutput)
+
+ ImportImage(*ec2.ImportImageInput) (*ec2.ImportImageOutput, error)
+ ImportImageWithContext(aws.Context, *ec2.ImportImageInput, ...request.Option) (*ec2.ImportImageOutput, error)
+ ImportImageRequest(*ec2.ImportImageInput) (*request.Request, *ec2.ImportImageOutput)
+
+ ImportInstance(*ec2.ImportInstanceInput) (*ec2.ImportInstanceOutput, error)
+ ImportInstanceWithContext(aws.Context, *ec2.ImportInstanceInput, ...request.Option) (*ec2.ImportInstanceOutput, error)
+ ImportInstanceRequest(*ec2.ImportInstanceInput) (*request.Request, *ec2.ImportInstanceOutput)
+
+ ImportKeyPair(*ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error)
+ ImportKeyPairWithContext(aws.Context, *ec2.ImportKeyPairInput, ...request.Option) (*ec2.ImportKeyPairOutput, error)
+ ImportKeyPairRequest(*ec2.ImportKeyPairInput) (*request.Request, *ec2.ImportKeyPairOutput)
+
+ ImportSnapshot(*ec2.ImportSnapshotInput) (*ec2.ImportSnapshotOutput, error)
+ ImportSnapshotWithContext(aws.Context, *ec2.ImportSnapshotInput, ...request.Option) (*ec2.ImportSnapshotOutput, error)
+ ImportSnapshotRequest(*ec2.ImportSnapshotInput) (*request.Request, *ec2.ImportSnapshotOutput)
+
+ ImportVolume(*ec2.ImportVolumeInput) (*ec2.ImportVolumeOutput, error)
+ ImportVolumeWithContext(aws.Context, *ec2.ImportVolumeInput, ...request.Option) (*ec2.ImportVolumeOutput, error)
+ ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput)
+
+ ListImagesInRecycleBin(*ec2.ListImagesInRecycleBinInput) (*ec2.ListImagesInRecycleBinOutput, error)
+ ListImagesInRecycleBinWithContext(aws.Context, *ec2.ListImagesInRecycleBinInput, ...request.Option) (*ec2.ListImagesInRecycleBinOutput, error)
+ ListImagesInRecycleBinRequest(*ec2.ListImagesInRecycleBinInput) (*request.Request, *ec2.ListImagesInRecycleBinOutput)
+
+ ListImagesInRecycleBinPages(*ec2.ListImagesInRecycleBinInput, func(*ec2.ListImagesInRecycleBinOutput, bool) bool) error
+ ListImagesInRecycleBinPagesWithContext(aws.Context, *ec2.ListImagesInRecycleBinInput, func(*ec2.ListImagesInRecycleBinOutput, bool) bool, ...request.Option) error
+
+ ListSnapshotsInRecycleBin(*ec2.ListSnapshotsInRecycleBinInput) (*ec2.ListSnapshotsInRecycleBinOutput, error)
+ ListSnapshotsInRecycleBinWithContext(aws.Context, *ec2.ListSnapshotsInRecycleBinInput, ...request.Option) (*ec2.ListSnapshotsInRecycleBinOutput, error)
+ ListSnapshotsInRecycleBinRequest(*ec2.ListSnapshotsInRecycleBinInput) (*request.Request, *ec2.ListSnapshotsInRecycleBinOutput)
+
+ ListSnapshotsInRecycleBinPages(*ec2.ListSnapshotsInRecycleBinInput, func(*ec2.ListSnapshotsInRecycleBinOutput, bool) bool) error
+ ListSnapshotsInRecycleBinPagesWithContext(aws.Context, *ec2.ListSnapshotsInRecycleBinInput, func(*ec2.ListSnapshotsInRecycleBinOutput, bool) bool, ...request.Option) error
+
+ ModifyAddressAttribute(*ec2.ModifyAddressAttributeInput) (*ec2.ModifyAddressAttributeOutput, error)
+ ModifyAddressAttributeWithContext(aws.Context, *ec2.ModifyAddressAttributeInput, ...request.Option) (*ec2.ModifyAddressAttributeOutput, error)
+ ModifyAddressAttributeRequest(*ec2.ModifyAddressAttributeInput) (*request.Request, *ec2.ModifyAddressAttributeOutput)
+
+ ModifyAvailabilityZoneGroup(*ec2.ModifyAvailabilityZoneGroupInput) (*ec2.ModifyAvailabilityZoneGroupOutput, error)
+ ModifyAvailabilityZoneGroupWithContext(aws.Context, *ec2.ModifyAvailabilityZoneGroupInput, ...request.Option) (*ec2.ModifyAvailabilityZoneGroupOutput, error)
+ ModifyAvailabilityZoneGroupRequest(*ec2.ModifyAvailabilityZoneGroupInput) (*request.Request, *ec2.ModifyAvailabilityZoneGroupOutput)
+
+ ModifyCapacityReservation(*ec2.ModifyCapacityReservationInput) (*ec2.ModifyCapacityReservationOutput, error)
+ ModifyCapacityReservationWithContext(aws.Context, *ec2.ModifyCapacityReservationInput, ...request.Option) (*ec2.ModifyCapacityReservationOutput, error)
+ ModifyCapacityReservationRequest(*ec2.ModifyCapacityReservationInput) (*request.Request, *ec2.ModifyCapacityReservationOutput)
+
+ ModifyCapacityReservationFleet(*ec2.ModifyCapacityReservationFleetInput) (*ec2.ModifyCapacityReservationFleetOutput, error)
+ ModifyCapacityReservationFleetWithContext(aws.Context, *ec2.ModifyCapacityReservationFleetInput, ...request.Option) (*ec2.ModifyCapacityReservationFleetOutput, error)
+ ModifyCapacityReservationFleetRequest(*ec2.ModifyCapacityReservationFleetInput) (*request.Request, *ec2.ModifyCapacityReservationFleetOutput)
+
+ ModifyClientVpnEndpoint(*ec2.ModifyClientVpnEndpointInput) (*ec2.ModifyClientVpnEndpointOutput, error)
+ ModifyClientVpnEndpointWithContext(aws.Context, *ec2.ModifyClientVpnEndpointInput, ...request.Option) (*ec2.ModifyClientVpnEndpointOutput, error)
+ ModifyClientVpnEndpointRequest(*ec2.ModifyClientVpnEndpointInput) (*request.Request, *ec2.ModifyClientVpnEndpointOutput)
+
+ ModifyDefaultCreditSpecification(*ec2.ModifyDefaultCreditSpecificationInput) (*ec2.ModifyDefaultCreditSpecificationOutput, error)
+ ModifyDefaultCreditSpecificationWithContext(aws.Context, *ec2.ModifyDefaultCreditSpecificationInput, ...request.Option) (*ec2.ModifyDefaultCreditSpecificationOutput, error)
+ ModifyDefaultCreditSpecificationRequest(*ec2.ModifyDefaultCreditSpecificationInput) (*request.Request, *ec2.ModifyDefaultCreditSpecificationOutput)
+
+ ModifyEbsDefaultKmsKeyId(*ec2.ModifyEbsDefaultKmsKeyIdInput) (*ec2.ModifyEbsDefaultKmsKeyIdOutput, error)
+ ModifyEbsDefaultKmsKeyIdWithContext(aws.Context, *ec2.ModifyEbsDefaultKmsKeyIdInput, ...request.Option) (*ec2.ModifyEbsDefaultKmsKeyIdOutput, error)
+ ModifyEbsDefaultKmsKeyIdRequest(*ec2.ModifyEbsDefaultKmsKeyIdInput) (*request.Request, *ec2.ModifyEbsDefaultKmsKeyIdOutput)
+
+ ModifyFleet(*ec2.ModifyFleetInput) (*ec2.ModifyFleetOutput, error)
+ ModifyFleetWithContext(aws.Context, *ec2.ModifyFleetInput, ...request.Option) (*ec2.ModifyFleetOutput, error)
+ ModifyFleetRequest(*ec2.ModifyFleetInput) (*request.Request, *ec2.ModifyFleetOutput)
+
+ ModifyFpgaImageAttribute(*ec2.ModifyFpgaImageAttributeInput) (*ec2.ModifyFpgaImageAttributeOutput, error)
+ ModifyFpgaImageAttributeWithContext(aws.Context, *ec2.ModifyFpgaImageAttributeInput, ...request.Option) (*ec2.ModifyFpgaImageAttributeOutput, error)
+ ModifyFpgaImageAttributeRequest(*ec2.ModifyFpgaImageAttributeInput) (*request.Request, *ec2.ModifyFpgaImageAttributeOutput)
+
+ ModifyHosts(*ec2.ModifyHostsInput) (*ec2.ModifyHostsOutput, error)
+ ModifyHostsWithContext(aws.Context, *ec2.ModifyHostsInput, ...request.Option) (*ec2.ModifyHostsOutput, error)
+ ModifyHostsRequest(*ec2.ModifyHostsInput) (*request.Request, *ec2.ModifyHostsOutput)
+
+ ModifyIdFormat(*ec2.ModifyIdFormatInput) (*ec2.ModifyIdFormatOutput, error)
+ ModifyIdFormatWithContext(aws.Context, *ec2.ModifyIdFormatInput, ...request.Option) (*ec2.ModifyIdFormatOutput, error)
+ ModifyIdFormatRequest(*ec2.ModifyIdFormatInput) (*request.Request, *ec2.ModifyIdFormatOutput)
+
+ ModifyIdentityIdFormat(*ec2.ModifyIdentityIdFormatInput) (*ec2.ModifyIdentityIdFormatOutput, error)
+ ModifyIdentityIdFormatWithContext(aws.Context, *ec2.ModifyIdentityIdFormatInput, ...request.Option) (*ec2.ModifyIdentityIdFormatOutput, error)
+ ModifyIdentityIdFormatRequest(*ec2.ModifyIdentityIdFormatInput) (*request.Request, *ec2.ModifyIdentityIdFormatOutput)
+
+ ModifyImageAttribute(*ec2.ModifyImageAttributeInput) (*ec2.ModifyImageAttributeOutput, error)
+ ModifyImageAttributeWithContext(aws.Context, *ec2.ModifyImageAttributeInput, ...request.Option) (*ec2.ModifyImageAttributeOutput, error)
+ ModifyImageAttributeRequest(*ec2.ModifyImageAttributeInput) (*request.Request, *ec2.ModifyImageAttributeOutput)
+
+ ModifyInstanceAttribute(*ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error)
+ ModifyInstanceAttributeWithContext(aws.Context, *ec2.ModifyInstanceAttributeInput, ...request.Option) (*ec2.ModifyInstanceAttributeOutput, error)
+ ModifyInstanceAttributeRequest(*ec2.ModifyInstanceAttributeInput) (*request.Request, *ec2.ModifyInstanceAttributeOutput)
+
+ ModifyInstanceCapacityReservationAttributes(*ec2.ModifyInstanceCapacityReservationAttributesInput) (*ec2.ModifyInstanceCapacityReservationAttributesOutput, error)
+ ModifyInstanceCapacityReservationAttributesWithContext(aws.Context, *ec2.ModifyInstanceCapacityReservationAttributesInput, ...request.Option) (*ec2.ModifyInstanceCapacityReservationAttributesOutput, error)
+ ModifyInstanceCapacityReservationAttributesRequest(*ec2.ModifyInstanceCapacityReservationAttributesInput) (*request.Request, *ec2.ModifyInstanceCapacityReservationAttributesOutput)
+
+ ModifyInstanceCreditSpecification(*ec2.ModifyInstanceCreditSpecificationInput) (*ec2.ModifyInstanceCreditSpecificationOutput, error)
+ ModifyInstanceCreditSpecificationWithContext(aws.Context, *ec2.ModifyInstanceCreditSpecificationInput, ...request.Option) (*ec2.ModifyInstanceCreditSpecificationOutput, error)
+ ModifyInstanceCreditSpecificationRequest(*ec2.ModifyInstanceCreditSpecificationInput) (*request.Request, *ec2.ModifyInstanceCreditSpecificationOutput)
+
+ ModifyInstanceEventStartTime(*ec2.ModifyInstanceEventStartTimeInput) (*ec2.ModifyInstanceEventStartTimeOutput, error)
+ ModifyInstanceEventStartTimeWithContext(aws.Context, *ec2.ModifyInstanceEventStartTimeInput, ...request.Option) (*ec2.ModifyInstanceEventStartTimeOutput, error)
+ ModifyInstanceEventStartTimeRequest(*ec2.ModifyInstanceEventStartTimeInput) (*request.Request, *ec2.ModifyInstanceEventStartTimeOutput)
+
+ ModifyInstanceEventWindow(*ec2.ModifyInstanceEventWindowInput) (*ec2.ModifyInstanceEventWindowOutput, error)
+ ModifyInstanceEventWindowWithContext(aws.Context, *ec2.ModifyInstanceEventWindowInput, ...request.Option) (*ec2.ModifyInstanceEventWindowOutput, error)
+ ModifyInstanceEventWindowRequest(*ec2.ModifyInstanceEventWindowInput) (*request.Request, *ec2.ModifyInstanceEventWindowOutput)
+
+ ModifyInstanceMaintenanceOptions(*ec2.ModifyInstanceMaintenanceOptionsInput) (*ec2.ModifyInstanceMaintenanceOptionsOutput, error)
+ ModifyInstanceMaintenanceOptionsWithContext(aws.Context, *ec2.ModifyInstanceMaintenanceOptionsInput, ...request.Option) (*ec2.ModifyInstanceMaintenanceOptionsOutput, error)
+ ModifyInstanceMaintenanceOptionsRequest(*ec2.ModifyInstanceMaintenanceOptionsInput) (*request.Request, *ec2.ModifyInstanceMaintenanceOptionsOutput)
+
+ ModifyInstanceMetadataOptions(*ec2.ModifyInstanceMetadataOptionsInput) (*ec2.ModifyInstanceMetadataOptionsOutput, error)
+ ModifyInstanceMetadataOptionsWithContext(aws.Context, *ec2.ModifyInstanceMetadataOptionsInput, ...request.Option) (*ec2.ModifyInstanceMetadataOptionsOutput, error)
+ ModifyInstanceMetadataOptionsRequest(*ec2.ModifyInstanceMetadataOptionsInput) (*request.Request, *ec2.ModifyInstanceMetadataOptionsOutput)
+
+ ModifyInstancePlacement(*ec2.ModifyInstancePlacementInput) (*ec2.ModifyInstancePlacementOutput, error)
+ ModifyInstancePlacementWithContext(aws.Context, *ec2.ModifyInstancePlacementInput, ...request.Option) (*ec2.ModifyInstancePlacementOutput, error)
+ ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) (*request.Request, *ec2.ModifyInstancePlacementOutput)
+
+ ModifyIpam(*ec2.ModifyIpamInput) (*ec2.ModifyIpamOutput, error)
+ ModifyIpamWithContext(aws.Context, *ec2.ModifyIpamInput, ...request.Option) (*ec2.ModifyIpamOutput, error)
+ ModifyIpamRequest(*ec2.ModifyIpamInput) (*request.Request, *ec2.ModifyIpamOutput)
+
+ ModifyIpamPool(*ec2.ModifyIpamPoolInput) (*ec2.ModifyIpamPoolOutput, error)
+ ModifyIpamPoolWithContext(aws.Context, *ec2.ModifyIpamPoolInput, ...request.Option) (*ec2.ModifyIpamPoolOutput, error)
+ ModifyIpamPoolRequest(*ec2.ModifyIpamPoolInput) (*request.Request, *ec2.ModifyIpamPoolOutput)
+
+ ModifyIpamResourceCidr(*ec2.ModifyIpamResourceCidrInput) (*ec2.ModifyIpamResourceCidrOutput, error)
+ ModifyIpamResourceCidrWithContext(aws.Context, *ec2.ModifyIpamResourceCidrInput, ...request.Option) (*ec2.ModifyIpamResourceCidrOutput, error)
+ ModifyIpamResourceCidrRequest(*ec2.ModifyIpamResourceCidrInput) (*request.Request, *ec2.ModifyIpamResourceCidrOutput)
+
+ ModifyIpamResourceDiscovery(*ec2.ModifyIpamResourceDiscoveryInput) (*ec2.ModifyIpamResourceDiscoveryOutput, error)
+ ModifyIpamResourceDiscoveryWithContext(aws.Context, *ec2.ModifyIpamResourceDiscoveryInput, ...request.Option) (*ec2.ModifyIpamResourceDiscoveryOutput, error)
+ ModifyIpamResourceDiscoveryRequest(*ec2.ModifyIpamResourceDiscoveryInput) (*request.Request, *ec2.ModifyIpamResourceDiscoveryOutput)
+
+ ModifyIpamScope(*ec2.ModifyIpamScopeInput) (*ec2.ModifyIpamScopeOutput, error)
+ ModifyIpamScopeWithContext(aws.Context, *ec2.ModifyIpamScopeInput, ...request.Option) (*ec2.ModifyIpamScopeOutput, error)
+ ModifyIpamScopeRequest(*ec2.ModifyIpamScopeInput) (*request.Request, *ec2.ModifyIpamScopeOutput)
+
+ ModifyLaunchTemplate(*ec2.ModifyLaunchTemplateInput) (*ec2.ModifyLaunchTemplateOutput, error)
+ ModifyLaunchTemplateWithContext(aws.Context, *ec2.ModifyLaunchTemplateInput, ...request.Option) (*ec2.ModifyLaunchTemplateOutput, error)
+ ModifyLaunchTemplateRequest(*ec2.ModifyLaunchTemplateInput) (*request.Request, *ec2.ModifyLaunchTemplateOutput)
+
+ ModifyLocalGatewayRoute(*ec2.ModifyLocalGatewayRouteInput) (*ec2.ModifyLocalGatewayRouteOutput, error)
+ ModifyLocalGatewayRouteWithContext(aws.Context, *ec2.ModifyLocalGatewayRouteInput, ...request.Option) (*ec2.ModifyLocalGatewayRouteOutput, error)
+ ModifyLocalGatewayRouteRequest(*ec2.ModifyLocalGatewayRouteInput) (*request.Request, *ec2.ModifyLocalGatewayRouteOutput)
+
+ ModifyManagedPrefixList(*ec2.ModifyManagedPrefixListInput) (*ec2.ModifyManagedPrefixListOutput, error)
+ ModifyManagedPrefixListWithContext(aws.Context, *ec2.ModifyManagedPrefixListInput, ...request.Option) (*ec2.ModifyManagedPrefixListOutput, error)
+ ModifyManagedPrefixListRequest(*ec2.ModifyManagedPrefixListInput) (*request.Request, *ec2.ModifyManagedPrefixListOutput)
+
+ ModifyNetworkInterfaceAttribute(*ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error)
+ ModifyNetworkInterfaceAttributeWithContext(aws.Context, *ec2.ModifyNetworkInterfaceAttributeInput, ...request.Option) (*ec2.ModifyNetworkInterfaceAttributeOutput, error)
+ ModifyNetworkInterfaceAttributeRequest(*ec2.ModifyNetworkInterfaceAttributeInput) (*request.Request, *ec2.ModifyNetworkInterfaceAttributeOutput)
+
+ ModifyPrivateDnsNameOptions(*ec2.ModifyPrivateDnsNameOptionsInput) (*ec2.ModifyPrivateDnsNameOptionsOutput, error)
+ ModifyPrivateDnsNameOptionsWithContext(aws.Context, *ec2.ModifyPrivateDnsNameOptionsInput, ...request.Option) (*ec2.ModifyPrivateDnsNameOptionsOutput, error)
+ ModifyPrivateDnsNameOptionsRequest(*ec2.ModifyPrivateDnsNameOptionsInput) (*request.Request, *ec2.ModifyPrivateDnsNameOptionsOutput)
+
+ ModifyReservedInstances(*ec2.ModifyReservedInstancesInput) (*ec2.ModifyReservedInstancesOutput, error)
+ ModifyReservedInstancesWithContext(aws.Context, *ec2.ModifyReservedInstancesInput, ...request.Option) (*ec2.ModifyReservedInstancesOutput, error)
+ ModifyReservedInstancesRequest(*ec2.ModifyReservedInstancesInput) (*request.Request, *ec2.ModifyReservedInstancesOutput)
+
+ ModifySecurityGroupRules(*ec2.ModifySecurityGroupRulesInput) (*ec2.ModifySecurityGroupRulesOutput, error)
+ ModifySecurityGroupRulesWithContext(aws.Context, *ec2.ModifySecurityGroupRulesInput, ...request.Option) (*ec2.ModifySecurityGroupRulesOutput, error)
+ ModifySecurityGroupRulesRequest(*ec2.ModifySecurityGroupRulesInput) (*request.Request, *ec2.ModifySecurityGroupRulesOutput)
+
+ ModifySnapshotAttribute(*ec2.ModifySnapshotAttributeInput) (*ec2.ModifySnapshotAttributeOutput, error)
+ ModifySnapshotAttributeWithContext(aws.Context, *ec2.ModifySnapshotAttributeInput, ...request.Option) (*ec2.ModifySnapshotAttributeOutput, error)
+ ModifySnapshotAttributeRequest(*ec2.ModifySnapshotAttributeInput) (*request.Request, *ec2.ModifySnapshotAttributeOutput)
+
+ ModifySnapshotTier(*ec2.ModifySnapshotTierInput) (*ec2.ModifySnapshotTierOutput, error)
+ ModifySnapshotTierWithContext(aws.Context, *ec2.ModifySnapshotTierInput, ...request.Option) (*ec2.ModifySnapshotTierOutput, error)
+ ModifySnapshotTierRequest(*ec2.ModifySnapshotTierInput) (*request.Request, *ec2.ModifySnapshotTierOutput)
+
+ ModifySpotFleetRequest(*ec2.ModifySpotFleetRequestInput) (*ec2.ModifySpotFleetRequestOutput, error)
+ ModifySpotFleetRequestWithContext(aws.Context, *ec2.ModifySpotFleetRequestInput, ...request.Option) (*ec2.ModifySpotFleetRequestOutput, error)
+ ModifySpotFleetRequestRequest(*ec2.ModifySpotFleetRequestInput) (*request.Request, *ec2.ModifySpotFleetRequestOutput)
+
+ ModifySubnetAttribute(*ec2.ModifySubnetAttributeInput) (*ec2.ModifySubnetAttributeOutput, error)
+ ModifySubnetAttributeWithContext(aws.Context, *ec2.ModifySubnetAttributeInput, ...request.Option) (*ec2.ModifySubnetAttributeOutput, error)
+ ModifySubnetAttributeRequest(*ec2.ModifySubnetAttributeInput) (*request.Request, *ec2.ModifySubnetAttributeOutput)
+
+ ModifyTrafficMirrorFilterNetworkServices(*ec2.ModifyTrafficMirrorFilterNetworkServicesInput) (*ec2.ModifyTrafficMirrorFilterNetworkServicesOutput, error)
+ ModifyTrafficMirrorFilterNetworkServicesWithContext(aws.Context, *ec2.ModifyTrafficMirrorFilterNetworkServicesInput, ...request.Option) (*ec2.ModifyTrafficMirrorFilterNetworkServicesOutput, error)
+ ModifyTrafficMirrorFilterNetworkServicesRequest(*ec2.ModifyTrafficMirrorFilterNetworkServicesInput) (*request.Request, *ec2.ModifyTrafficMirrorFilterNetworkServicesOutput)
+
+ ModifyTrafficMirrorFilterRule(*ec2.ModifyTrafficMirrorFilterRuleInput) (*ec2.ModifyTrafficMirrorFilterRuleOutput, error)
+ ModifyTrafficMirrorFilterRuleWithContext(aws.Context, *ec2.ModifyTrafficMirrorFilterRuleInput, ...request.Option) (*ec2.ModifyTrafficMirrorFilterRuleOutput, error)
+ ModifyTrafficMirrorFilterRuleRequest(*ec2.ModifyTrafficMirrorFilterRuleInput) (*request.Request, *ec2.ModifyTrafficMirrorFilterRuleOutput)
+
+ ModifyTrafficMirrorSession(*ec2.ModifyTrafficMirrorSessionInput) (*ec2.ModifyTrafficMirrorSessionOutput, error)
+ ModifyTrafficMirrorSessionWithContext(aws.Context, *ec2.ModifyTrafficMirrorSessionInput, ...request.Option) (*ec2.ModifyTrafficMirrorSessionOutput, error)
+ ModifyTrafficMirrorSessionRequest(*ec2.ModifyTrafficMirrorSessionInput) (*request.Request, *ec2.ModifyTrafficMirrorSessionOutput)
+
+ ModifyTransitGateway(*ec2.ModifyTransitGatewayInput) (*ec2.ModifyTransitGatewayOutput, error)
+ ModifyTransitGatewayWithContext(aws.Context, *ec2.ModifyTransitGatewayInput, ...request.Option) (*ec2.ModifyTransitGatewayOutput, error)
+ ModifyTransitGatewayRequest(*ec2.ModifyTransitGatewayInput) (*request.Request, *ec2.ModifyTransitGatewayOutput)
+
+ ModifyTransitGatewayPrefixListReference(*ec2.ModifyTransitGatewayPrefixListReferenceInput) (*ec2.ModifyTransitGatewayPrefixListReferenceOutput, error)
+ ModifyTransitGatewayPrefixListReferenceWithContext(aws.Context, *ec2.ModifyTransitGatewayPrefixListReferenceInput, ...request.Option) (*ec2.ModifyTransitGatewayPrefixListReferenceOutput, error)
+ ModifyTransitGatewayPrefixListReferenceRequest(*ec2.ModifyTransitGatewayPrefixListReferenceInput) (*request.Request, *ec2.ModifyTransitGatewayPrefixListReferenceOutput)
+
+ ModifyTransitGatewayVpcAttachment(*ec2.ModifyTransitGatewayVpcAttachmentInput) (*ec2.ModifyTransitGatewayVpcAttachmentOutput, error)
+ ModifyTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.ModifyTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.ModifyTransitGatewayVpcAttachmentOutput, error)
+ ModifyTransitGatewayVpcAttachmentRequest(*ec2.ModifyTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.ModifyTransitGatewayVpcAttachmentOutput)
+
+ ModifyVerifiedAccessEndpoint(*ec2.ModifyVerifiedAccessEndpointInput) (*ec2.ModifyVerifiedAccessEndpointOutput, error)
+ ModifyVerifiedAccessEndpointWithContext(aws.Context, *ec2.ModifyVerifiedAccessEndpointInput, ...request.Option) (*ec2.ModifyVerifiedAccessEndpointOutput, error)
+ ModifyVerifiedAccessEndpointRequest(*ec2.ModifyVerifiedAccessEndpointInput) (*request.Request, *ec2.ModifyVerifiedAccessEndpointOutput)
+
+ ModifyVerifiedAccessEndpointPolicy(*ec2.ModifyVerifiedAccessEndpointPolicyInput) (*ec2.ModifyVerifiedAccessEndpointPolicyOutput, error)
+ ModifyVerifiedAccessEndpointPolicyWithContext(aws.Context, *ec2.ModifyVerifiedAccessEndpointPolicyInput, ...request.Option) (*ec2.ModifyVerifiedAccessEndpointPolicyOutput, error)
+ ModifyVerifiedAccessEndpointPolicyRequest(*ec2.ModifyVerifiedAccessEndpointPolicyInput) (*request.Request, *ec2.ModifyVerifiedAccessEndpointPolicyOutput)
+
+ ModifyVerifiedAccessGroup(*ec2.ModifyVerifiedAccessGroupInput) (*ec2.ModifyVerifiedAccessGroupOutput, error)
+ ModifyVerifiedAccessGroupWithContext(aws.Context, *ec2.ModifyVerifiedAccessGroupInput, ...request.Option) (*ec2.ModifyVerifiedAccessGroupOutput, error)
+ ModifyVerifiedAccessGroupRequest(*ec2.ModifyVerifiedAccessGroupInput) (*request.Request, *ec2.ModifyVerifiedAccessGroupOutput)
+
+ ModifyVerifiedAccessGroupPolicy(*ec2.ModifyVerifiedAccessGroupPolicyInput) (*ec2.ModifyVerifiedAccessGroupPolicyOutput, error)
+ ModifyVerifiedAccessGroupPolicyWithContext(aws.Context, *ec2.ModifyVerifiedAccessGroupPolicyInput, ...request.Option) (*ec2.ModifyVerifiedAccessGroupPolicyOutput, error)
+ ModifyVerifiedAccessGroupPolicyRequest(*ec2.ModifyVerifiedAccessGroupPolicyInput) (*request.Request, *ec2.ModifyVerifiedAccessGroupPolicyOutput)
+
+ ModifyVerifiedAccessInstance(*ec2.ModifyVerifiedAccessInstanceInput) (*ec2.ModifyVerifiedAccessInstanceOutput, error)
+ ModifyVerifiedAccessInstanceWithContext(aws.Context, *ec2.ModifyVerifiedAccessInstanceInput, ...request.Option) (*ec2.ModifyVerifiedAccessInstanceOutput, error)
+ ModifyVerifiedAccessInstanceRequest(*ec2.ModifyVerifiedAccessInstanceInput) (*request.Request, *ec2.ModifyVerifiedAccessInstanceOutput)
+
+ ModifyVerifiedAccessInstanceLoggingConfiguration(*ec2.ModifyVerifiedAccessInstanceLoggingConfigurationInput) (*ec2.ModifyVerifiedAccessInstanceLoggingConfigurationOutput, error)
+ ModifyVerifiedAccessInstanceLoggingConfigurationWithContext(aws.Context, *ec2.ModifyVerifiedAccessInstanceLoggingConfigurationInput, ...request.Option) (*ec2.ModifyVerifiedAccessInstanceLoggingConfigurationOutput, error)
+ ModifyVerifiedAccessInstanceLoggingConfigurationRequest(*ec2.ModifyVerifiedAccessInstanceLoggingConfigurationInput) (*request.Request, *ec2.ModifyVerifiedAccessInstanceLoggingConfigurationOutput)
+
+ ModifyVerifiedAccessTrustProvider(*ec2.ModifyVerifiedAccessTrustProviderInput) (*ec2.ModifyVerifiedAccessTrustProviderOutput, error)
+ ModifyVerifiedAccessTrustProviderWithContext(aws.Context, *ec2.ModifyVerifiedAccessTrustProviderInput, ...request.Option) (*ec2.ModifyVerifiedAccessTrustProviderOutput, error)
+ ModifyVerifiedAccessTrustProviderRequest(*ec2.ModifyVerifiedAccessTrustProviderInput) (*request.Request, *ec2.ModifyVerifiedAccessTrustProviderOutput)
+
+ ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error)
+ ModifyVolumeWithContext(aws.Context, *ec2.ModifyVolumeInput, ...request.Option) (*ec2.ModifyVolumeOutput, error)
+ ModifyVolumeRequest(*ec2.ModifyVolumeInput) (*request.Request, *ec2.ModifyVolumeOutput)
+
+ ModifyVolumeAttribute(*ec2.ModifyVolumeAttributeInput) (*ec2.ModifyVolumeAttributeOutput, error)
+ ModifyVolumeAttributeWithContext(aws.Context, *ec2.ModifyVolumeAttributeInput, ...request.Option) (*ec2.ModifyVolumeAttributeOutput, error)
+ ModifyVolumeAttributeRequest(*ec2.ModifyVolumeAttributeInput) (*request.Request, *ec2.ModifyVolumeAttributeOutput)
+
+ ModifyVpcAttribute(*ec2.ModifyVpcAttributeInput) (*ec2.ModifyVpcAttributeOutput, error)
+ ModifyVpcAttributeWithContext(aws.Context, *ec2.ModifyVpcAttributeInput, ...request.Option) (*ec2.ModifyVpcAttributeOutput, error)
+ ModifyVpcAttributeRequest(*ec2.ModifyVpcAttributeInput) (*request.Request, *ec2.ModifyVpcAttributeOutput)
+
+ ModifyVpcEndpoint(*ec2.ModifyVpcEndpointInput) (*ec2.ModifyVpcEndpointOutput, error)
+ ModifyVpcEndpointWithContext(aws.Context, *ec2.ModifyVpcEndpointInput, ...request.Option) (*ec2.ModifyVpcEndpointOutput, error)
+ ModifyVpcEndpointRequest(*ec2.ModifyVpcEndpointInput) (*request.Request, *ec2.ModifyVpcEndpointOutput)
+
+ ModifyVpcEndpointConnectionNotification(*ec2.ModifyVpcEndpointConnectionNotificationInput) (*ec2.ModifyVpcEndpointConnectionNotificationOutput, error)
+ ModifyVpcEndpointConnectionNotificationWithContext(aws.Context, *ec2.ModifyVpcEndpointConnectionNotificationInput, ...request.Option) (*ec2.ModifyVpcEndpointConnectionNotificationOutput, error)
+ ModifyVpcEndpointConnectionNotificationRequest(*ec2.ModifyVpcEndpointConnectionNotificationInput) (*request.Request, *ec2.ModifyVpcEndpointConnectionNotificationOutput)
+
+ ModifyVpcEndpointServiceConfiguration(*ec2.ModifyVpcEndpointServiceConfigurationInput) (*ec2.ModifyVpcEndpointServiceConfigurationOutput, error)
+ ModifyVpcEndpointServiceConfigurationWithContext(aws.Context, *ec2.ModifyVpcEndpointServiceConfigurationInput, ...request.Option) (*ec2.ModifyVpcEndpointServiceConfigurationOutput, error)
+ ModifyVpcEndpointServiceConfigurationRequest(*ec2.ModifyVpcEndpointServiceConfigurationInput) (*request.Request, *ec2.ModifyVpcEndpointServiceConfigurationOutput)
+
+ ModifyVpcEndpointServicePayerResponsibility(*ec2.ModifyVpcEndpointServicePayerResponsibilityInput) (*ec2.ModifyVpcEndpointServicePayerResponsibilityOutput, error)
+ ModifyVpcEndpointServicePayerResponsibilityWithContext(aws.Context, *ec2.ModifyVpcEndpointServicePayerResponsibilityInput, ...request.Option) (*ec2.ModifyVpcEndpointServicePayerResponsibilityOutput, error)
+ ModifyVpcEndpointServicePayerResponsibilityRequest(*ec2.ModifyVpcEndpointServicePayerResponsibilityInput) (*request.Request, *ec2.ModifyVpcEndpointServicePayerResponsibilityOutput)
+
+ ModifyVpcEndpointServicePermissions(*ec2.ModifyVpcEndpointServicePermissionsInput) (*ec2.ModifyVpcEndpointServicePermissionsOutput, error)
+ ModifyVpcEndpointServicePermissionsWithContext(aws.Context, *ec2.ModifyVpcEndpointServicePermissionsInput, ...request.Option) (*ec2.ModifyVpcEndpointServicePermissionsOutput, error)
+ ModifyVpcEndpointServicePermissionsRequest(*ec2.ModifyVpcEndpointServicePermissionsInput) (*request.Request, *ec2.ModifyVpcEndpointServicePermissionsOutput)
+
+ ModifyVpcPeeringConnectionOptions(*ec2.ModifyVpcPeeringConnectionOptionsInput) (*ec2.ModifyVpcPeeringConnectionOptionsOutput, error)
+ ModifyVpcPeeringConnectionOptionsWithContext(aws.Context, *ec2.ModifyVpcPeeringConnectionOptionsInput, ...request.Option) (*ec2.ModifyVpcPeeringConnectionOptionsOutput, error)
+ ModifyVpcPeeringConnectionOptionsRequest(*ec2.ModifyVpcPeeringConnectionOptionsInput) (*request.Request, *ec2.ModifyVpcPeeringConnectionOptionsOutput)
+
+ ModifyVpcTenancy(*ec2.ModifyVpcTenancyInput) (*ec2.ModifyVpcTenancyOutput, error)
+ ModifyVpcTenancyWithContext(aws.Context, *ec2.ModifyVpcTenancyInput, ...request.Option) (*ec2.ModifyVpcTenancyOutput, error)
+ ModifyVpcTenancyRequest(*ec2.ModifyVpcTenancyInput) (*request.Request, *ec2.ModifyVpcTenancyOutput)
+
+ ModifyVpnConnection(*ec2.ModifyVpnConnectionInput) (*ec2.ModifyVpnConnectionOutput, error)
+ ModifyVpnConnectionWithContext(aws.Context, *ec2.ModifyVpnConnectionInput, ...request.Option) (*ec2.ModifyVpnConnectionOutput, error)
+ ModifyVpnConnectionRequest(*ec2.ModifyVpnConnectionInput) (*request.Request, *ec2.ModifyVpnConnectionOutput)
+
+ ModifyVpnConnectionOptions(*ec2.ModifyVpnConnectionOptionsInput) (*ec2.ModifyVpnConnectionOptionsOutput, error)
+ ModifyVpnConnectionOptionsWithContext(aws.Context, *ec2.ModifyVpnConnectionOptionsInput, ...request.Option) (*ec2.ModifyVpnConnectionOptionsOutput, error)
+ ModifyVpnConnectionOptionsRequest(*ec2.ModifyVpnConnectionOptionsInput) (*request.Request, *ec2.ModifyVpnConnectionOptionsOutput)
+
+ ModifyVpnTunnelCertificate(*ec2.ModifyVpnTunnelCertificateInput) (*ec2.ModifyVpnTunnelCertificateOutput, error)
+ ModifyVpnTunnelCertificateWithContext(aws.Context, *ec2.ModifyVpnTunnelCertificateInput, ...request.Option) (*ec2.ModifyVpnTunnelCertificateOutput, error)
+ ModifyVpnTunnelCertificateRequest(*ec2.ModifyVpnTunnelCertificateInput) (*request.Request, *ec2.ModifyVpnTunnelCertificateOutput)
+
+ ModifyVpnTunnelOptions(*ec2.ModifyVpnTunnelOptionsInput) (*ec2.ModifyVpnTunnelOptionsOutput, error)
+ ModifyVpnTunnelOptionsWithContext(aws.Context, *ec2.ModifyVpnTunnelOptionsInput, ...request.Option) (*ec2.ModifyVpnTunnelOptionsOutput, error)
+ ModifyVpnTunnelOptionsRequest(*ec2.ModifyVpnTunnelOptionsInput) (*request.Request, *ec2.ModifyVpnTunnelOptionsOutput)
+
+ MonitorInstances(*ec2.MonitorInstancesInput) (*ec2.MonitorInstancesOutput, error)
+ MonitorInstancesWithContext(aws.Context, *ec2.MonitorInstancesInput, ...request.Option) (*ec2.MonitorInstancesOutput, error)
+ MonitorInstancesRequest(*ec2.MonitorInstancesInput) (*request.Request, *ec2.MonitorInstancesOutput)
+
+ MoveAddressToVpc(*ec2.MoveAddressToVpcInput) (*ec2.MoveAddressToVpcOutput, error)
+ MoveAddressToVpcWithContext(aws.Context, *ec2.MoveAddressToVpcInput, ...request.Option) (*ec2.MoveAddressToVpcOutput, error)
+ MoveAddressToVpcRequest(*ec2.MoveAddressToVpcInput) (*request.Request, *ec2.MoveAddressToVpcOutput)
+
+ MoveByoipCidrToIpam(*ec2.MoveByoipCidrToIpamInput) (*ec2.MoveByoipCidrToIpamOutput, error)
+ MoveByoipCidrToIpamWithContext(aws.Context, *ec2.MoveByoipCidrToIpamInput, ...request.Option) (*ec2.MoveByoipCidrToIpamOutput, error)
+ MoveByoipCidrToIpamRequest(*ec2.MoveByoipCidrToIpamInput) (*request.Request, *ec2.MoveByoipCidrToIpamOutput)
+
+ ProvisionByoipCidr(*ec2.ProvisionByoipCidrInput) (*ec2.ProvisionByoipCidrOutput, error)
+ ProvisionByoipCidrWithContext(aws.Context, *ec2.ProvisionByoipCidrInput, ...request.Option) (*ec2.ProvisionByoipCidrOutput, error)
+ ProvisionByoipCidrRequest(*ec2.ProvisionByoipCidrInput) (*request.Request, *ec2.ProvisionByoipCidrOutput)
+
+ ProvisionIpamPoolCidr(*ec2.ProvisionIpamPoolCidrInput) (*ec2.ProvisionIpamPoolCidrOutput, error)
+ ProvisionIpamPoolCidrWithContext(aws.Context, *ec2.ProvisionIpamPoolCidrInput, ...request.Option) (*ec2.ProvisionIpamPoolCidrOutput, error)
+ ProvisionIpamPoolCidrRequest(*ec2.ProvisionIpamPoolCidrInput) (*request.Request, *ec2.ProvisionIpamPoolCidrOutput)
+
+ ProvisionPublicIpv4PoolCidr(*ec2.ProvisionPublicIpv4PoolCidrInput) (*ec2.ProvisionPublicIpv4PoolCidrOutput, error)
+ ProvisionPublicIpv4PoolCidrWithContext(aws.Context, *ec2.ProvisionPublicIpv4PoolCidrInput, ...request.Option) (*ec2.ProvisionPublicIpv4PoolCidrOutput, error)
+ ProvisionPublicIpv4PoolCidrRequest(*ec2.ProvisionPublicIpv4PoolCidrInput) (*request.Request, *ec2.ProvisionPublicIpv4PoolCidrOutput)
+
+ PurchaseHostReservation(*ec2.PurchaseHostReservationInput) (*ec2.PurchaseHostReservationOutput, error)
+ PurchaseHostReservationWithContext(aws.Context, *ec2.PurchaseHostReservationInput, ...request.Option) (*ec2.PurchaseHostReservationOutput, error)
+ PurchaseHostReservationRequest(*ec2.PurchaseHostReservationInput) (*request.Request, *ec2.PurchaseHostReservationOutput)
+
+ PurchaseReservedInstancesOffering(*ec2.PurchaseReservedInstancesOfferingInput) (*ec2.PurchaseReservedInstancesOfferingOutput, error)
+ PurchaseReservedInstancesOfferingWithContext(aws.Context, *ec2.PurchaseReservedInstancesOfferingInput, ...request.Option) (*ec2.PurchaseReservedInstancesOfferingOutput, error)
+ PurchaseReservedInstancesOfferingRequest(*ec2.PurchaseReservedInstancesOfferingInput) (*request.Request, *ec2.PurchaseReservedInstancesOfferingOutput)
+
+ PurchaseScheduledInstances(*ec2.PurchaseScheduledInstancesInput) (*ec2.PurchaseScheduledInstancesOutput, error)
+ PurchaseScheduledInstancesWithContext(aws.Context, *ec2.PurchaseScheduledInstancesInput, ...request.Option) (*ec2.PurchaseScheduledInstancesOutput, error)
+ PurchaseScheduledInstancesRequest(*ec2.PurchaseScheduledInstancesInput) (*request.Request, *ec2.PurchaseScheduledInstancesOutput)
+
+ RebootInstances(*ec2.RebootInstancesInput) (*ec2.RebootInstancesOutput, error)
+ RebootInstancesWithContext(aws.Context, *ec2.RebootInstancesInput, ...request.Option) (*ec2.RebootInstancesOutput, error)
+ RebootInstancesRequest(*ec2.RebootInstancesInput) (*request.Request, *ec2.RebootInstancesOutput)
+
+ RegisterImage(*ec2.RegisterImageInput) (*ec2.RegisterImageOutput, error)
+ RegisterImageWithContext(aws.Context, *ec2.RegisterImageInput, ...request.Option) (*ec2.RegisterImageOutput, error)
+ RegisterImageRequest(*ec2.RegisterImageInput) (*request.Request, *ec2.RegisterImageOutput)
+
+ RegisterInstanceEventNotificationAttributes(*ec2.RegisterInstanceEventNotificationAttributesInput) (*ec2.RegisterInstanceEventNotificationAttributesOutput, error)
+ RegisterInstanceEventNotificationAttributesWithContext(aws.Context, *ec2.RegisterInstanceEventNotificationAttributesInput, ...request.Option) (*ec2.RegisterInstanceEventNotificationAttributesOutput, error)
+ RegisterInstanceEventNotificationAttributesRequest(*ec2.RegisterInstanceEventNotificationAttributesInput) (*request.Request, *ec2.RegisterInstanceEventNotificationAttributesOutput)
+
+ RegisterTransitGatewayMulticastGroupMembers(*ec2.RegisterTransitGatewayMulticastGroupMembersInput) (*ec2.RegisterTransitGatewayMulticastGroupMembersOutput, error)
+ RegisterTransitGatewayMulticastGroupMembersWithContext(aws.Context, *ec2.RegisterTransitGatewayMulticastGroupMembersInput, ...request.Option) (*ec2.RegisterTransitGatewayMulticastGroupMembersOutput, error)
+ RegisterTransitGatewayMulticastGroupMembersRequest(*ec2.RegisterTransitGatewayMulticastGroupMembersInput) (*request.Request, *ec2.RegisterTransitGatewayMulticastGroupMembersOutput)
+
+ RegisterTransitGatewayMulticastGroupSources(*ec2.RegisterTransitGatewayMulticastGroupSourcesInput) (*ec2.RegisterTransitGatewayMulticastGroupSourcesOutput, error)
+ RegisterTransitGatewayMulticastGroupSourcesWithContext(aws.Context, *ec2.RegisterTransitGatewayMulticastGroupSourcesInput, ...request.Option) (*ec2.RegisterTransitGatewayMulticastGroupSourcesOutput, error)
+ RegisterTransitGatewayMulticastGroupSourcesRequest(*ec2.RegisterTransitGatewayMulticastGroupSourcesInput) (*request.Request, *ec2.RegisterTransitGatewayMulticastGroupSourcesOutput)
+
+ RejectTransitGatewayMulticastDomainAssociations(*ec2.RejectTransitGatewayMulticastDomainAssociationsInput) (*ec2.RejectTransitGatewayMulticastDomainAssociationsOutput, error)
+ RejectTransitGatewayMulticastDomainAssociationsWithContext(aws.Context, *ec2.RejectTransitGatewayMulticastDomainAssociationsInput, ...request.Option) (*ec2.RejectTransitGatewayMulticastDomainAssociationsOutput, error)
+ RejectTransitGatewayMulticastDomainAssociationsRequest(*ec2.RejectTransitGatewayMulticastDomainAssociationsInput) (*request.Request, *ec2.RejectTransitGatewayMulticastDomainAssociationsOutput)
+
+ RejectTransitGatewayPeeringAttachment(*ec2.RejectTransitGatewayPeeringAttachmentInput) (*ec2.RejectTransitGatewayPeeringAttachmentOutput, error)
+ RejectTransitGatewayPeeringAttachmentWithContext(aws.Context, *ec2.RejectTransitGatewayPeeringAttachmentInput, ...request.Option) (*ec2.RejectTransitGatewayPeeringAttachmentOutput, error)
+ RejectTransitGatewayPeeringAttachmentRequest(*ec2.RejectTransitGatewayPeeringAttachmentInput) (*request.Request, *ec2.RejectTransitGatewayPeeringAttachmentOutput)
+
+ RejectTransitGatewayVpcAttachment(*ec2.RejectTransitGatewayVpcAttachmentInput) (*ec2.RejectTransitGatewayVpcAttachmentOutput, error)
+ RejectTransitGatewayVpcAttachmentWithContext(aws.Context, *ec2.RejectTransitGatewayVpcAttachmentInput, ...request.Option) (*ec2.RejectTransitGatewayVpcAttachmentOutput, error)
+ RejectTransitGatewayVpcAttachmentRequest(*ec2.RejectTransitGatewayVpcAttachmentInput) (*request.Request, *ec2.RejectTransitGatewayVpcAttachmentOutput)
+
+ RejectVpcEndpointConnections(*ec2.RejectVpcEndpointConnectionsInput) (*ec2.RejectVpcEndpointConnectionsOutput, error)
+ RejectVpcEndpointConnectionsWithContext(aws.Context, *ec2.RejectVpcEndpointConnectionsInput, ...request.Option) (*ec2.RejectVpcEndpointConnectionsOutput, error)
+ RejectVpcEndpointConnectionsRequest(*ec2.RejectVpcEndpointConnectionsInput) (*request.Request, *ec2.RejectVpcEndpointConnectionsOutput)
+
+ RejectVpcPeeringConnection(*ec2.RejectVpcPeeringConnectionInput) (*ec2.RejectVpcPeeringConnectionOutput, error)
+ RejectVpcPeeringConnectionWithContext(aws.Context, *ec2.RejectVpcPeeringConnectionInput, ...request.Option) (*ec2.RejectVpcPeeringConnectionOutput, error)
+ RejectVpcPeeringConnectionRequest(*ec2.RejectVpcPeeringConnectionInput) (*request.Request, *ec2.RejectVpcPeeringConnectionOutput)
+
+ ReleaseAddress(*ec2.ReleaseAddressInput) (*ec2.ReleaseAddressOutput, error)
+ ReleaseAddressWithContext(aws.Context, *ec2.ReleaseAddressInput, ...request.Option) (*ec2.ReleaseAddressOutput, error)
+ ReleaseAddressRequest(*ec2.ReleaseAddressInput) (*request.Request, *ec2.ReleaseAddressOutput)
+
+ ReleaseHosts(*ec2.ReleaseHostsInput) (*ec2.ReleaseHostsOutput, error)
+ ReleaseHostsWithContext(aws.Context, *ec2.ReleaseHostsInput, ...request.Option) (*ec2.ReleaseHostsOutput, error)
+ ReleaseHostsRequest(*ec2.ReleaseHostsInput) (*request.Request, *ec2.ReleaseHostsOutput)
+
+ ReleaseIpamPoolAllocation(*ec2.ReleaseIpamPoolAllocationInput) (*ec2.ReleaseIpamPoolAllocationOutput, error)
+ ReleaseIpamPoolAllocationWithContext(aws.Context, *ec2.ReleaseIpamPoolAllocationInput, ...request.Option) (*ec2.ReleaseIpamPoolAllocationOutput, error)
+ ReleaseIpamPoolAllocationRequest(*ec2.ReleaseIpamPoolAllocationInput) (*request.Request, *ec2.ReleaseIpamPoolAllocationOutput)
+
+ ReplaceIamInstanceProfileAssociation(*ec2.ReplaceIamInstanceProfileAssociationInput) (*ec2.ReplaceIamInstanceProfileAssociationOutput, error)
+ ReplaceIamInstanceProfileAssociationWithContext(aws.Context, *ec2.ReplaceIamInstanceProfileAssociationInput, ...request.Option) (*ec2.ReplaceIamInstanceProfileAssociationOutput, error)
+ ReplaceIamInstanceProfileAssociationRequest(*ec2.ReplaceIamInstanceProfileAssociationInput) (*request.Request, *ec2.ReplaceIamInstanceProfileAssociationOutput)
+
+ ReplaceNetworkAclAssociation(*ec2.ReplaceNetworkAclAssociationInput) (*ec2.ReplaceNetworkAclAssociationOutput, error)
+ ReplaceNetworkAclAssociationWithContext(aws.Context, *ec2.ReplaceNetworkAclAssociationInput, ...request.Option) (*ec2.ReplaceNetworkAclAssociationOutput, error)
+ ReplaceNetworkAclAssociationRequest(*ec2.ReplaceNetworkAclAssociationInput) (*request.Request, *ec2.ReplaceNetworkAclAssociationOutput)
+
+ ReplaceNetworkAclEntry(*ec2.ReplaceNetworkAclEntryInput) (*ec2.ReplaceNetworkAclEntryOutput, error)
+ ReplaceNetworkAclEntryWithContext(aws.Context, *ec2.ReplaceNetworkAclEntryInput, ...request.Option) (*ec2.ReplaceNetworkAclEntryOutput, error)
+ ReplaceNetworkAclEntryRequest(*ec2.ReplaceNetworkAclEntryInput) (*request.Request, *ec2.ReplaceNetworkAclEntryOutput)
+
+ ReplaceRoute(*ec2.ReplaceRouteInput) (*ec2.ReplaceRouteOutput, error)
+ ReplaceRouteWithContext(aws.Context, *ec2.ReplaceRouteInput, ...request.Option) (*ec2.ReplaceRouteOutput, error)
+ ReplaceRouteRequest(*ec2.ReplaceRouteInput) (*request.Request, *ec2.ReplaceRouteOutput)
+
+ ReplaceRouteTableAssociation(*ec2.ReplaceRouteTableAssociationInput) (*ec2.ReplaceRouteTableAssociationOutput, error)
+ ReplaceRouteTableAssociationWithContext(aws.Context, *ec2.ReplaceRouteTableAssociationInput, ...request.Option) (*ec2.ReplaceRouteTableAssociationOutput, error)
+ ReplaceRouteTableAssociationRequest(*ec2.ReplaceRouteTableAssociationInput) (*request.Request, *ec2.ReplaceRouteTableAssociationOutput)
+
+ ReplaceTransitGatewayRoute(*ec2.ReplaceTransitGatewayRouteInput) (*ec2.ReplaceTransitGatewayRouteOutput, error)
+ ReplaceTransitGatewayRouteWithContext(aws.Context, *ec2.ReplaceTransitGatewayRouteInput, ...request.Option) (*ec2.ReplaceTransitGatewayRouteOutput, error)
+ ReplaceTransitGatewayRouteRequest(*ec2.ReplaceTransitGatewayRouteInput) (*request.Request, *ec2.ReplaceTransitGatewayRouteOutput)
+
+ ReplaceVpnTunnel(*ec2.ReplaceVpnTunnelInput) (*ec2.ReplaceVpnTunnelOutput, error)
+ ReplaceVpnTunnelWithContext(aws.Context, *ec2.ReplaceVpnTunnelInput, ...request.Option) (*ec2.ReplaceVpnTunnelOutput, error)
+ ReplaceVpnTunnelRequest(*ec2.ReplaceVpnTunnelInput) (*request.Request, *ec2.ReplaceVpnTunnelOutput)
+
+ ReportInstanceStatus(*ec2.ReportInstanceStatusInput) (*ec2.ReportInstanceStatusOutput, error)
+ ReportInstanceStatusWithContext(aws.Context, *ec2.ReportInstanceStatusInput, ...request.Option) (*ec2.ReportInstanceStatusOutput, error)
+ ReportInstanceStatusRequest(*ec2.ReportInstanceStatusInput) (*request.Request, *ec2.ReportInstanceStatusOutput)
+
+ RequestSpotFleet(*ec2.RequestSpotFleetInput) (*ec2.RequestSpotFleetOutput, error)
+ RequestSpotFleetWithContext(aws.Context, *ec2.RequestSpotFleetInput, ...request.Option) (*ec2.RequestSpotFleetOutput, error)
+ RequestSpotFleetRequest(*ec2.RequestSpotFleetInput) (*request.Request, *ec2.RequestSpotFleetOutput)
+
+ RequestSpotInstances(*ec2.RequestSpotInstancesInput) (*ec2.RequestSpotInstancesOutput, error)
+ RequestSpotInstancesWithContext(aws.Context, *ec2.RequestSpotInstancesInput, ...request.Option) (*ec2.RequestSpotInstancesOutput, error)
+ RequestSpotInstancesRequest(*ec2.RequestSpotInstancesInput) (*request.Request, *ec2.RequestSpotInstancesOutput)
+
+ ResetAddressAttribute(*ec2.ResetAddressAttributeInput) (*ec2.ResetAddressAttributeOutput, error)
+ ResetAddressAttributeWithContext(aws.Context, *ec2.ResetAddressAttributeInput, ...request.Option) (*ec2.ResetAddressAttributeOutput, error)
+ ResetAddressAttributeRequest(*ec2.ResetAddressAttributeInput) (*request.Request, *ec2.ResetAddressAttributeOutput)
+
+ ResetEbsDefaultKmsKeyId(*ec2.ResetEbsDefaultKmsKeyIdInput) (*ec2.ResetEbsDefaultKmsKeyIdOutput, error)
+ ResetEbsDefaultKmsKeyIdWithContext(aws.Context, *ec2.ResetEbsDefaultKmsKeyIdInput, ...request.Option) (*ec2.ResetEbsDefaultKmsKeyIdOutput, error)
+ ResetEbsDefaultKmsKeyIdRequest(*ec2.ResetEbsDefaultKmsKeyIdInput) (*request.Request, *ec2.ResetEbsDefaultKmsKeyIdOutput)
+
+ ResetFpgaImageAttribute(*ec2.ResetFpgaImageAttributeInput) (*ec2.ResetFpgaImageAttributeOutput, error)
+ ResetFpgaImageAttributeWithContext(aws.Context, *ec2.ResetFpgaImageAttributeInput, ...request.Option) (*ec2.ResetFpgaImageAttributeOutput, error)
+ ResetFpgaImageAttributeRequest(*ec2.ResetFpgaImageAttributeInput) (*request.Request, *ec2.ResetFpgaImageAttributeOutput)
+
+ ResetImageAttribute(*ec2.ResetImageAttributeInput) (*ec2.ResetImageAttributeOutput, error)
+ ResetImageAttributeWithContext(aws.Context, *ec2.ResetImageAttributeInput, ...request.Option) (*ec2.ResetImageAttributeOutput, error)
+ ResetImageAttributeRequest(*ec2.ResetImageAttributeInput) (*request.Request, *ec2.ResetImageAttributeOutput)
+
+ ResetInstanceAttribute(*ec2.ResetInstanceAttributeInput) (*ec2.ResetInstanceAttributeOutput, error)
+ ResetInstanceAttributeWithContext(aws.Context, *ec2.ResetInstanceAttributeInput, ...request.Option) (*ec2.ResetInstanceAttributeOutput, error)
+ ResetInstanceAttributeRequest(*ec2.ResetInstanceAttributeInput) (*request.Request, *ec2.ResetInstanceAttributeOutput)
+
+ ResetNetworkInterfaceAttribute(*ec2.ResetNetworkInterfaceAttributeInput) (*ec2.ResetNetworkInterfaceAttributeOutput, error)
+ ResetNetworkInterfaceAttributeWithContext(aws.Context, *ec2.ResetNetworkInterfaceAttributeInput, ...request.Option) (*ec2.ResetNetworkInterfaceAttributeOutput, error)
+ ResetNetworkInterfaceAttributeRequest(*ec2.ResetNetworkInterfaceAttributeInput) (*request.Request, *ec2.ResetNetworkInterfaceAttributeOutput)
+
+ ResetSnapshotAttribute(*ec2.ResetSnapshotAttributeInput) (*ec2.ResetSnapshotAttributeOutput, error)
+ ResetSnapshotAttributeWithContext(aws.Context, *ec2.ResetSnapshotAttributeInput, ...request.Option) (*ec2.ResetSnapshotAttributeOutput, error)
+ ResetSnapshotAttributeRequest(*ec2.ResetSnapshotAttributeInput) (*request.Request, *ec2.ResetSnapshotAttributeOutput)
+
+ RestoreAddressToClassic(*ec2.RestoreAddressToClassicInput) (*ec2.RestoreAddressToClassicOutput, error)
+ RestoreAddressToClassicWithContext(aws.Context, *ec2.RestoreAddressToClassicInput, ...request.Option) (*ec2.RestoreAddressToClassicOutput, error)
+ RestoreAddressToClassicRequest(*ec2.RestoreAddressToClassicInput) (*request.Request, *ec2.RestoreAddressToClassicOutput)
+
+ RestoreImageFromRecycleBin(*ec2.RestoreImageFromRecycleBinInput) (*ec2.RestoreImageFromRecycleBinOutput, error)
+ RestoreImageFromRecycleBinWithContext(aws.Context, *ec2.RestoreImageFromRecycleBinInput, ...request.Option) (*ec2.RestoreImageFromRecycleBinOutput, error)
+ RestoreImageFromRecycleBinRequest(*ec2.RestoreImageFromRecycleBinInput) (*request.Request, *ec2.RestoreImageFromRecycleBinOutput)
+
+ RestoreManagedPrefixListVersion(*ec2.RestoreManagedPrefixListVersionInput) (*ec2.RestoreManagedPrefixListVersionOutput, error)
+ RestoreManagedPrefixListVersionWithContext(aws.Context, *ec2.RestoreManagedPrefixListVersionInput, ...request.Option) (*ec2.RestoreManagedPrefixListVersionOutput, error)
+ RestoreManagedPrefixListVersionRequest(*ec2.RestoreManagedPrefixListVersionInput) (*request.Request, *ec2.RestoreManagedPrefixListVersionOutput)
+
+ RestoreSnapshotFromRecycleBin(*ec2.RestoreSnapshotFromRecycleBinInput) (*ec2.RestoreSnapshotFromRecycleBinOutput, error)
+ RestoreSnapshotFromRecycleBinWithContext(aws.Context, *ec2.RestoreSnapshotFromRecycleBinInput, ...request.Option) (*ec2.RestoreSnapshotFromRecycleBinOutput, error)
+ RestoreSnapshotFromRecycleBinRequest(*ec2.RestoreSnapshotFromRecycleBinInput) (*request.Request, *ec2.RestoreSnapshotFromRecycleBinOutput)
+
+ RestoreSnapshotTier(*ec2.RestoreSnapshotTierInput) (*ec2.RestoreSnapshotTierOutput, error)
+ RestoreSnapshotTierWithContext(aws.Context, *ec2.RestoreSnapshotTierInput, ...request.Option) (*ec2.RestoreSnapshotTierOutput, error)
+ RestoreSnapshotTierRequest(*ec2.RestoreSnapshotTierInput) (*request.Request, *ec2.RestoreSnapshotTierOutput)
+
+ RevokeClientVpnIngress(*ec2.RevokeClientVpnIngressInput) (*ec2.RevokeClientVpnIngressOutput, error)
+ RevokeClientVpnIngressWithContext(aws.Context, *ec2.RevokeClientVpnIngressInput, ...request.Option) (*ec2.RevokeClientVpnIngressOutput, error)
+ RevokeClientVpnIngressRequest(*ec2.RevokeClientVpnIngressInput) (*request.Request, *ec2.RevokeClientVpnIngressOutput)
+
+ RevokeSecurityGroupEgress(*ec2.RevokeSecurityGroupEgressInput) (*ec2.RevokeSecurityGroupEgressOutput, error)
+ RevokeSecurityGroupEgressWithContext(aws.Context, *ec2.RevokeSecurityGroupEgressInput, ...request.Option) (*ec2.RevokeSecurityGroupEgressOutput, error)
+ RevokeSecurityGroupEgressRequest(*ec2.RevokeSecurityGroupEgressInput) (*request.Request, *ec2.RevokeSecurityGroupEgressOutput)
+
+ RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error)
+ RevokeSecurityGroupIngressWithContext(aws.Context, *ec2.RevokeSecurityGroupIngressInput, ...request.Option) (*ec2.RevokeSecurityGroupIngressOutput, error)
+ RevokeSecurityGroupIngressRequest(*ec2.RevokeSecurityGroupIngressInput) (*request.Request, *ec2.RevokeSecurityGroupIngressOutput)
+
+ RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error)
+ RunInstancesWithContext(aws.Context, *ec2.RunInstancesInput, ...request.Option) (*ec2.Reservation, error)
+ RunInstancesRequest(*ec2.RunInstancesInput) (*request.Request, *ec2.Reservation)
+
+ RunScheduledInstances(*ec2.RunScheduledInstancesInput) (*ec2.RunScheduledInstancesOutput, error)
+ RunScheduledInstancesWithContext(aws.Context, *ec2.RunScheduledInstancesInput, ...request.Option) (*ec2.RunScheduledInstancesOutput, error)
+ RunScheduledInstancesRequest(*ec2.RunScheduledInstancesInput) (*request.Request, *ec2.RunScheduledInstancesOutput)
+
+ SearchLocalGatewayRoutes(*ec2.SearchLocalGatewayRoutesInput) (*ec2.SearchLocalGatewayRoutesOutput, error)
+ SearchLocalGatewayRoutesWithContext(aws.Context, *ec2.SearchLocalGatewayRoutesInput, ...request.Option) (*ec2.SearchLocalGatewayRoutesOutput, error)
+ SearchLocalGatewayRoutesRequest(*ec2.SearchLocalGatewayRoutesInput) (*request.Request, *ec2.SearchLocalGatewayRoutesOutput)
+
+ SearchLocalGatewayRoutesPages(*ec2.SearchLocalGatewayRoutesInput, func(*ec2.SearchLocalGatewayRoutesOutput, bool) bool) error
+ SearchLocalGatewayRoutesPagesWithContext(aws.Context, *ec2.SearchLocalGatewayRoutesInput, func(*ec2.SearchLocalGatewayRoutesOutput, bool) bool, ...request.Option) error
+
+ SearchTransitGatewayMulticastGroups(*ec2.SearchTransitGatewayMulticastGroupsInput) (*ec2.SearchTransitGatewayMulticastGroupsOutput, error)
+ SearchTransitGatewayMulticastGroupsWithContext(aws.Context, *ec2.SearchTransitGatewayMulticastGroupsInput, ...request.Option) (*ec2.SearchTransitGatewayMulticastGroupsOutput, error)
+ SearchTransitGatewayMulticastGroupsRequest(*ec2.SearchTransitGatewayMulticastGroupsInput) (*request.Request, *ec2.SearchTransitGatewayMulticastGroupsOutput)
+
+ SearchTransitGatewayMulticastGroupsPages(*ec2.SearchTransitGatewayMulticastGroupsInput, func(*ec2.SearchTransitGatewayMulticastGroupsOutput, bool) bool) error
+ SearchTransitGatewayMulticastGroupsPagesWithContext(aws.Context, *ec2.SearchTransitGatewayMulticastGroupsInput, func(*ec2.SearchTransitGatewayMulticastGroupsOutput, bool) bool, ...request.Option) error
+
+ SearchTransitGatewayRoutes(*ec2.SearchTransitGatewayRoutesInput) (*ec2.SearchTransitGatewayRoutesOutput, error)
+ SearchTransitGatewayRoutesWithContext(aws.Context, *ec2.SearchTransitGatewayRoutesInput, ...request.Option) (*ec2.SearchTransitGatewayRoutesOutput, error)
+ SearchTransitGatewayRoutesRequest(*ec2.SearchTransitGatewayRoutesInput) (*request.Request, *ec2.SearchTransitGatewayRoutesOutput)
+
+ SendDiagnosticInterrupt(*ec2.SendDiagnosticInterruptInput) (*ec2.SendDiagnosticInterruptOutput, error)
+ SendDiagnosticInterruptWithContext(aws.Context, *ec2.SendDiagnosticInterruptInput, ...request.Option) (*ec2.SendDiagnosticInterruptOutput, error)
+ SendDiagnosticInterruptRequest(*ec2.SendDiagnosticInterruptInput) (*request.Request, *ec2.SendDiagnosticInterruptOutput)
+
+ StartInstances(*ec2.StartInstancesInput) (*ec2.StartInstancesOutput, error)
+ StartInstancesWithContext(aws.Context, *ec2.StartInstancesInput, ...request.Option) (*ec2.StartInstancesOutput, error)
+ StartInstancesRequest(*ec2.StartInstancesInput) (*request.Request, *ec2.StartInstancesOutput)
+
+ StartNetworkInsightsAccessScopeAnalysis(*ec2.StartNetworkInsightsAccessScopeAnalysisInput) (*ec2.StartNetworkInsightsAccessScopeAnalysisOutput, error)
+ StartNetworkInsightsAccessScopeAnalysisWithContext(aws.Context, *ec2.StartNetworkInsightsAccessScopeAnalysisInput, ...request.Option) (*ec2.StartNetworkInsightsAccessScopeAnalysisOutput, error)
+ StartNetworkInsightsAccessScopeAnalysisRequest(*ec2.StartNetworkInsightsAccessScopeAnalysisInput) (*request.Request, *ec2.StartNetworkInsightsAccessScopeAnalysisOutput)
+
+ StartNetworkInsightsAnalysis(*ec2.StartNetworkInsightsAnalysisInput) (*ec2.StartNetworkInsightsAnalysisOutput, error)
+ StartNetworkInsightsAnalysisWithContext(aws.Context, *ec2.StartNetworkInsightsAnalysisInput, ...request.Option) (*ec2.StartNetworkInsightsAnalysisOutput, error)
+ StartNetworkInsightsAnalysisRequest(*ec2.StartNetworkInsightsAnalysisInput) (*request.Request, *ec2.StartNetworkInsightsAnalysisOutput)
+
+ StartVpcEndpointServicePrivateDnsVerification(*ec2.StartVpcEndpointServicePrivateDnsVerificationInput) (*ec2.StartVpcEndpointServicePrivateDnsVerificationOutput, error)
+ StartVpcEndpointServicePrivateDnsVerificationWithContext(aws.Context, *ec2.StartVpcEndpointServicePrivateDnsVerificationInput, ...request.Option) (*ec2.StartVpcEndpointServicePrivateDnsVerificationOutput, error)
+ StartVpcEndpointServicePrivateDnsVerificationRequest(*ec2.StartVpcEndpointServicePrivateDnsVerificationInput) (*request.Request, *ec2.StartVpcEndpointServicePrivateDnsVerificationOutput)
+
+ StopInstances(*ec2.StopInstancesInput) (*ec2.StopInstancesOutput, error)
+ StopInstancesWithContext(aws.Context, *ec2.StopInstancesInput, ...request.Option) (*ec2.StopInstancesOutput, error)
+ StopInstancesRequest(*ec2.StopInstancesInput) (*request.Request, *ec2.StopInstancesOutput)
+
+ TerminateClientVpnConnections(*ec2.TerminateClientVpnConnectionsInput) (*ec2.TerminateClientVpnConnectionsOutput, error)
+ TerminateClientVpnConnectionsWithContext(aws.Context, *ec2.TerminateClientVpnConnectionsInput, ...request.Option) (*ec2.TerminateClientVpnConnectionsOutput, error)
+ TerminateClientVpnConnectionsRequest(*ec2.TerminateClientVpnConnectionsInput) (*request.Request, *ec2.TerminateClientVpnConnectionsOutput)
+
+ TerminateInstances(*ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error)
+ TerminateInstancesWithContext(aws.Context, *ec2.TerminateInstancesInput, ...request.Option) (*ec2.TerminateInstancesOutput, error)
+ TerminateInstancesRequest(*ec2.TerminateInstancesInput) (*request.Request, *ec2.TerminateInstancesOutput)
+
+ UnassignIpv6Addresses(*ec2.UnassignIpv6AddressesInput) (*ec2.UnassignIpv6AddressesOutput, error)
+ UnassignIpv6AddressesWithContext(aws.Context, *ec2.UnassignIpv6AddressesInput, ...request.Option) (*ec2.UnassignIpv6AddressesOutput, error)
+ UnassignIpv6AddressesRequest(*ec2.UnassignIpv6AddressesInput) (*request.Request, *ec2.UnassignIpv6AddressesOutput)
+
+ UnassignPrivateIpAddresses(*ec2.UnassignPrivateIpAddressesInput) (*ec2.UnassignPrivateIpAddressesOutput, error)
+ UnassignPrivateIpAddressesWithContext(aws.Context, *ec2.UnassignPrivateIpAddressesInput, ...request.Option) (*ec2.UnassignPrivateIpAddressesOutput, error)
+ UnassignPrivateIpAddressesRequest(*ec2.UnassignPrivateIpAddressesInput) (*request.Request, *ec2.UnassignPrivateIpAddressesOutput)
+
+ UnassignPrivateNatGatewayAddress(*ec2.UnassignPrivateNatGatewayAddressInput) (*ec2.UnassignPrivateNatGatewayAddressOutput, error)
+ UnassignPrivateNatGatewayAddressWithContext(aws.Context, *ec2.UnassignPrivateNatGatewayAddressInput, ...request.Option) (*ec2.UnassignPrivateNatGatewayAddressOutput, error)
+ UnassignPrivateNatGatewayAddressRequest(*ec2.UnassignPrivateNatGatewayAddressInput) (*request.Request, *ec2.UnassignPrivateNatGatewayAddressOutput)
+
+ UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error)
+ UnmonitorInstancesWithContext(aws.Context, *ec2.UnmonitorInstancesInput, ...request.Option) (*ec2.UnmonitorInstancesOutput, error)
+ UnmonitorInstancesRequest(*ec2.UnmonitorInstancesInput) (*request.Request, *ec2.UnmonitorInstancesOutput)
+
+ UpdateSecurityGroupRuleDescriptionsEgress(*ec2.UpdateSecurityGroupRuleDescriptionsEgressInput) (*ec2.UpdateSecurityGroupRuleDescriptionsEgressOutput, error)
+ UpdateSecurityGroupRuleDescriptionsEgressWithContext(aws.Context, *ec2.UpdateSecurityGroupRuleDescriptionsEgressInput, ...request.Option) (*ec2.UpdateSecurityGroupRuleDescriptionsEgressOutput, error)
+ UpdateSecurityGroupRuleDescriptionsEgressRequest(*ec2.UpdateSecurityGroupRuleDescriptionsEgressInput) (*request.Request, *ec2.UpdateSecurityGroupRuleDescriptionsEgressOutput)
+
+ UpdateSecurityGroupRuleDescriptionsIngress(*ec2.UpdateSecurityGroupRuleDescriptionsIngressInput) (*ec2.UpdateSecurityGroupRuleDescriptionsIngressOutput, error)
+ UpdateSecurityGroupRuleDescriptionsIngressWithContext(aws.Context, *ec2.UpdateSecurityGroupRuleDescriptionsIngressInput, ...request.Option) (*ec2.UpdateSecurityGroupRuleDescriptionsIngressOutput, error)
+ UpdateSecurityGroupRuleDescriptionsIngressRequest(*ec2.UpdateSecurityGroupRuleDescriptionsIngressInput) (*request.Request, *ec2.UpdateSecurityGroupRuleDescriptionsIngressOutput)
+
+ WithdrawByoipCidr(*ec2.WithdrawByoipCidrInput) (*ec2.WithdrawByoipCidrOutput, error)
+ WithdrawByoipCidrWithContext(aws.Context, *ec2.WithdrawByoipCidrInput, ...request.Option) (*ec2.WithdrawByoipCidrOutput, error)
+ WithdrawByoipCidrRequest(*ec2.WithdrawByoipCidrInput) (*request.Request, *ec2.WithdrawByoipCidrOutput)
+
+ WaitUntilBundleTaskComplete(*ec2.DescribeBundleTasksInput) error
+ WaitUntilBundleTaskCompleteWithContext(aws.Context, *ec2.DescribeBundleTasksInput, ...request.WaiterOption) error
+
+ WaitUntilConversionTaskCancelled(*ec2.DescribeConversionTasksInput) error
+ WaitUntilConversionTaskCancelledWithContext(aws.Context, *ec2.DescribeConversionTasksInput, ...request.WaiterOption) error
+
+ WaitUntilConversionTaskCompleted(*ec2.DescribeConversionTasksInput) error
+ WaitUntilConversionTaskCompletedWithContext(aws.Context, *ec2.DescribeConversionTasksInput, ...request.WaiterOption) error
+
+ WaitUntilConversionTaskDeleted(*ec2.DescribeConversionTasksInput) error
+ WaitUntilConversionTaskDeletedWithContext(aws.Context, *ec2.DescribeConversionTasksInput, ...request.WaiterOption) error
+
+ WaitUntilCustomerGatewayAvailable(*ec2.DescribeCustomerGatewaysInput) error
+ WaitUntilCustomerGatewayAvailableWithContext(aws.Context, *ec2.DescribeCustomerGatewaysInput, ...request.WaiterOption) error
+
+ WaitUntilExportTaskCancelled(*ec2.DescribeExportTasksInput) error
+ WaitUntilExportTaskCancelledWithContext(aws.Context, *ec2.DescribeExportTasksInput, ...request.WaiterOption) error
+
+ WaitUntilExportTaskCompleted(*ec2.DescribeExportTasksInput) error
+ WaitUntilExportTaskCompletedWithContext(aws.Context, *ec2.DescribeExportTasksInput, ...request.WaiterOption) error
+
+ WaitUntilImageAvailable(*ec2.DescribeImagesInput) error
+ WaitUntilImageAvailableWithContext(aws.Context, *ec2.DescribeImagesInput, ...request.WaiterOption) error
+
+ WaitUntilImageExists(*ec2.DescribeImagesInput) error
+ WaitUntilImageExistsWithContext(aws.Context, *ec2.DescribeImagesInput, ...request.WaiterOption) error
+
+ WaitUntilInstanceExists(*ec2.DescribeInstancesInput) error
+ WaitUntilInstanceExistsWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.WaiterOption) error
+
+ WaitUntilInstanceRunning(*ec2.DescribeInstancesInput) error
+ WaitUntilInstanceRunningWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.WaiterOption) error
+
+ WaitUntilInstanceStatusOk(*ec2.DescribeInstanceStatusInput) error
+ WaitUntilInstanceStatusOkWithContext(aws.Context, *ec2.DescribeInstanceStatusInput, ...request.WaiterOption) error
+
+ WaitUntilInstanceStopped(*ec2.DescribeInstancesInput) error
+ WaitUntilInstanceStoppedWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.WaiterOption) error
+
+ WaitUntilInstanceTerminated(*ec2.DescribeInstancesInput) error
+ WaitUntilInstanceTerminatedWithContext(aws.Context, *ec2.DescribeInstancesInput, ...request.WaiterOption) error
+
+ WaitUntilInternetGatewayExists(*ec2.DescribeInternetGatewaysInput) error
+ WaitUntilInternetGatewayExistsWithContext(aws.Context, *ec2.DescribeInternetGatewaysInput, ...request.WaiterOption) error
+
+ WaitUntilKeyPairExists(*ec2.DescribeKeyPairsInput) error
+ WaitUntilKeyPairExistsWithContext(aws.Context, *ec2.DescribeKeyPairsInput, ...request.WaiterOption) error
+
+ WaitUntilNatGatewayAvailable(*ec2.DescribeNatGatewaysInput) error
+ WaitUntilNatGatewayAvailableWithContext(aws.Context, *ec2.DescribeNatGatewaysInput, ...request.WaiterOption) error
+
+ WaitUntilNatGatewayDeleted(*ec2.DescribeNatGatewaysInput) error
+ WaitUntilNatGatewayDeletedWithContext(aws.Context, *ec2.DescribeNatGatewaysInput, ...request.WaiterOption) error
+
+ WaitUntilNetworkInterfaceAvailable(*ec2.DescribeNetworkInterfacesInput) error
+ WaitUntilNetworkInterfaceAvailableWithContext(aws.Context, *ec2.DescribeNetworkInterfacesInput, ...request.WaiterOption) error
+
+ WaitUntilPasswordDataAvailable(*ec2.GetPasswordDataInput) error
+ WaitUntilPasswordDataAvailableWithContext(aws.Context, *ec2.GetPasswordDataInput, ...request.WaiterOption) error
+
+ WaitUntilSecurityGroupExists(*ec2.DescribeSecurityGroupsInput) error
+ WaitUntilSecurityGroupExistsWithContext(aws.Context, *ec2.DescribeSecurityGroupsInput, ...request.WaiterOption) error
+
+ WaitUntilSnapshotCompleted(*ec2.DescribeSnapshotsInput) error
+ WaitUntilSnapshotCompletedWithContext(aws.Context, *ec2.DescribeSnapshotsInput, ...request.WaiterOption) error
+
+ WaitUntilSnapshotImported(*ec2.DescribeImportSnapshotTasksInput) error
+ WaitUntilSnapshotImportedWithContext(aws.Context, *ec2.DescribeImportSnapshotTasksInput, ...request.WaiterOption) error
+
+ WaitUntilSpotInstanceRequestFulfilled(*ec2.DescribeSpotInstanceRequestsInput) error
+ WaitUntilSpotInstanceRequestFulfilledWithContext(aws.Context, *ec2.DescribeSpotInstanceRequestsInput, ...request.WaiterOption) error
+
+ WaitUntilSubnetAvailable(*ec2.DescribeSubnetsInput) error
+ WaitUntilSubnetAvailableWithContext(aws.Context, *ec2.DescribeSubnetsInput, ...request.WaiterOption) error
+
+ WaitUntilSystemStatusOk(*ec2.DescribeInstanceStatusInput) error
+ WaitUntilSystemStatusOkWithContext(aws.Context, *ec2.DescribeInstanceStatusInput, ...request.WaiterOption) error
+
+ WaitUntilVolumeAvailable(*ec2.DescribeVolumesInput) error
+ WaitUntilVolumeAvailableWithContext(aws.Context, *ec2.DescribeVolumesInput, ...request.WaiterOption) error
+
+ WaitUntilVolumeDeleted(*ec2.DescribeVolumesInput) error
+ WaitUntilVolumeDeletedWithContext(aws.Context, *ec2.DescribeVolumesInput, ...request.WaiterOption) error
+
+ WaitUntilVolumeInUse(*ec2.DescribeVolumesInput) error
+ WaitUntilVolumeInUseWithContext(aws.Context, *ec2.DescribeVolumesInput, ...request.WaiterOption) error
+
+ WaitUntilVpcAvailable(*ec2.DescribeVpcsInput) error
+ WaitUntilVpcAvailableWithContext(aws.Context, *ec2.DescribeVpcsInput, ...request.WaiterOption) error
+
+ WaitUntilVpcExists(*ec2.DescribeVpcsInput) error
+ WaitUntilVpcExistsWithContext(aws.Context, *ec2.DescribeVpcsInput, ...request.WaiterOption) error
+
+ WaitUntilVpcPeeringConnectionDeleted(*ec2.DescribeVpcPeeringConnectionsInput) error
+ WaitUntilVpcPeeringConnectionDeletedWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, ...request.WaiterOption) error
+
+ WaitUntilVpcPeeringConnectionExists(*ec2.DescribeVpcPeeringConnectionsInput) error
+ WaitUntilVpcPeeringConnectionExistsWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, ...request.WaiterOption) error
+
+ WaitUntilVpnConnectionAvailable(*ec2.DescribeVpnConnectionsInput) error
+ WaitUntilVpnConnectionAvailableWithContext(aws.Context, *ec2.DescribeVpnConnectionsInput, ...request.WaiterOption) error
+
+ WaitUntilVpnConnectionDeleted(*ec2.DescribeVpnConnectionsInput) error
+ WaitUntilVpnConnectionDeletedWithContext(aws.Context, *ec2.DescribeVpnConnectionsInput, ...request.WaiterOption) error
+}
+
+var _ EC2API = (*ec2.EC2)(nil)
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
index a10de4f3f136..a970f12fbf1b 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
@@ -1156,6 +1156,57 @@ func (c *EC2) WaitUntilSnapshotCompletedWithContext(ctx aws.Context, input *Desc
return w.WaitWithContext(ctx)
}
+// WaitUntilSnapshotImported uses the Amazon EC2 API operation
+// DescribeImportSnapshotTasks to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *EC2) WaitUntilSnapshotImported(input *DescribeImportSnapshotTasksInput) error {
+ return c.WaitUntilSnapshotImportedWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilSnapshotImportedWithContext is an extended version of WaitUntilSnapshotImported.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) WaitUntilSnapshotImportedWithContext(ctx aws.Context, input *DescribeImportSnapshotTasksInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilSnapshotImported",
+ MaxAttempts: 40,
+ Delay: request.ConstantWaiterDelay(15 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.PathAllWaiterMatch, Argument: "ImportSnapshotTasks[].SnapshotTaskDetail.Status",
+ Expected: "completed",
+ },
+ {
+ State: request.FailureWaiterState,
+ Matcher: request.PathAnyWaiterMatch, Argument: "ImportSnapshotTasks[].SnapshotTaskDetail.Status",
+ Expected: "error",
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *DescribeImportSnapshotTasksInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeImportSnapshotTasksRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
// WaitUntilSpotInstanceRequestFulfilled uses the Amazon EC2 API operation
// DescribeSpotInstanceRequests to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go
deleted file mode 100644
index d8813a8af4d8..000000000000
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go
+++ /dev/null
@@ -1,14352 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package ecr
-
-import (
- "fmt"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
-)
-
-const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability"
-
-// BatchCheckLayerAvailabilityRequest generates a "aws/request.Request" representing the
-// client's request for the BatchCheckLayerAvailability operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See BatchCheckLayerAvailability for more information on using the BatchCheckLayerAvailability
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the BatchCheckLayerAvailabilityRequest method.
-// req, resp := client.BatchCheckLayerAvailabilityRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability
-func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabilityInput) (req *request.Request, output *BatchCheckLayerAvailabilityOutput) {
- op := &request.Operation{
- Name: opBatchCheckLayerAvailability,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &BatchCheckLayerAvailabilityInput{}
- }
-
- output = &BatchCheckLayerAvailabilityOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// BatchCheckLayerAvailability API operation for Amazon EC2 Container Registry.
-//
-// Checks the availability of one or more image layers in a repository.
-//
-// When an image is pushed to a repository, each image layer is checked to verify
-// if it has been uploaded before. If it has been uploaded, then the image layer
-// is skipped.
-//
-// This operation is used by the Amazon ECR proxy and is not generally used
-// by customers for pulling and pushing images. In most cases, you should use
-// the docker CLI to pull, tag, and push images.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation BatchCheckLayerAvailability for usage and error information.
-//
-// Returned Error Types:
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability
-func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInput) (*BatchCheckLayerAvailabilityOutput, error) {
- req, out := c.BatchCheckLayerAvailabilityRequest(input)
- return out, req.Send()
-}
-
-// BatchCheckLayerAvailabilityWithContext is the same as BatchCheckLayerAvailability with the addition of
-// the ability to pass a context and additional request options.
-//
-// See BatchCheckLayerAvailability for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) BatchCheckLayerAvailabilityWithContext(ctx aws.Context, input *BatchCheckLayerAvailabilityInput, opts ...request.Option) (*BatchCheckLayerAvailabilityOutput, error) {
- req, out := c.BatchCheckLayerAvailabilityRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opBatchDeleteImage = "BatchDeleteImage"
-
-// BatchDeleteImageRequest generates a "aws/request.Request" representing the
-// client's request for the BatchDeleteImage operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See BatchDeleteImage for more information on using the BatchDeleteImage
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the BatchDeleteImageRequest method.
-// req, resp := client.BatchDeleteImageRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage
-func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *request.Request, output *BatchDeleteImageOutput) {
- op := &request.Operation{
- Name: opBatchDeleteImage,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &BatchDeleteImageInput{}
- }
-
- output = &BatchDeleteImageOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// BatchDeleteImage API operation for Amazon EC2 Container Registry.
-//
-// Deletes a list of specified images within a repository. Images are specified
-// with either an imageTag or imageDigest.
-//
-// You can remove a tag from an image by specifying the image's tag in your
-// request. When you remove the last tag from an image, the image is deleted
-// from your repository.
-//
-// You can completely delete an image (and all of its tags) by specifying the
-// image's digest in your request.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation BatchDeleteImage for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage
-func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageOutput, error) {
- req, out := c.BatchDeleteImageRequest(input)
- return out, req.Send()
-}
-
-// BatchDeleteImageWithContext is the same as BatchDeleteImage with the addition of
-// the ability to pass a context and additional request options.
-//
-// See BatchDeleteImage for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) BatchDeleteImageWithContext(ctx aws.Context, input *BatchDeleteImageInput, opts ...request.Option) (*BatchDeleteImageOutput, error) {
- req, out := c.BatchDeleteImageRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opBatchGetImage = "BatchGetImage"
-
-// BatchGetImageRequest generates a "aws/request.Request" representing the
-// client's request for the BatchGetImage operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See BatchGetImage for more information on using the BatchGetImage
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the BatchGetImageRequest method.
-// req, resp := client.BatchGetImageRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage
-func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Request, output *BatchGetImageOutput) {
- op := &request.Operation{
- Name: opBatchGetImage,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &BatchGetImageInput{}
- }
-
- output = &BatchGetImageOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// BatchGetImage API operation for Amazon EC2 Container Registry.
-//
-// Gets detailed information for an image. Images are specified with either
-// an imageTag or imageDigest.
-//
-// When an image is pulled, the BatchGetImage API is called once to retrieve
-// the image manifest.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation BatchGetImage for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage
-func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) {
- req, out := c.BatchGetImageRequest(input)
- return out, req.Send()
-}
-
-// BatchGetImageWithContext is the same as BatchGetImage with the addition of
-// the ability to pass a context and additional request options.
-//
-// See BatchGetImage for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) BatchGetImageWithContext(ctx aws.Context, input *BatchGetImageInput, opts ...request.Option) (*BatchGetImageOutput, error) {
- req, out := c.BatchGetImageRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opBatchGetRepositoryScanningConfiguration = "BatchGetRepositoryScanningConfiguration"
-
-// BatchGetRepositoryScanningConfigurationRequest generates a "aws/request.Request" representing the
-// client's request for the BatchGetRepositoryScanningConfiguration operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See BatchGetRepositoryScanningConfiguration for more information on using the BatchGetRepositoryScanningConfiguration
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the BatchGetRepositoryScanningConfigurationRequest method.
-// req, resp := client.BatchGetRepositoryScanningConfigurationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetRepositoryScanningConfiguration
-func (c *ECR) BatchGetRepositoryScanningConfigurationRequest(input *BatchGetRepositoryScanningConfigurationInput) (req *request.Request, output *BatchGetRepositoryScanningConfigurationOutput) {
- op := &request.Operation{
- Name: opBatchGetRepositoryScanningConfiguration,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &BatchGetRepositoryScanningConfigurationInput{}
- }
-
- output = &BatchGetRepositoryScanningConfigurationOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// BatchGetRepositoryScanningConfiguration API operation for Amazon EC2 Container Registry.
-//
-// Gets the scanning configuration for one or more repositories.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation BatchGetRepositoryScanningConfiguration for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetRepositoryScanningConfiguration
-func (c *ECR) BatchGetRepositoryScanningConfiguration(input *BatchGetRepositoryScanningConfigurationInput) (*BatchGetRepositoryScanningConfigurationOutput, error) {
- req, out := c.BatchGetRepositoryScanningConfigurationRequest(input)
- return out, req.Send()
-}
-
-// BatchGetRepositoryScanningConfigurationWithContext is the same as BatchGetRepositoryScanningConfiguration with the addition of
-// the ability to pass a context and additional request options.
-//
-// See BatchGetRepositoryScanningConfiguration for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) BatchGetRepositoryScanningConfigurationWithContext(ctx aws.Context, input *BatchGetRepositoryScanningConfigurationInput, opts ...request.Option) (*BatchGetRepositoryScanningConfigurationOutput, error) {
- req, out := c.BatchGetRepositoryScanningConfigurationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opCompleteLayerUpload = "CompleteLayerUpload"
-
-// CompleteLayerUploadRequest generates a "aws/request.Request" representing the
-// client's request for the CompleteLayerUpload operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See CompleteLayerUpload for more information on using the CompleteLayerUpload
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the CompleteLayerUploadRequest method.
-// req, resp := client.CompleteLayerUploadRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload
-func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *request.Request, output *CompleteLayerUploadOutput) {
- op := &request.Operation{
- Name: opCompleteLayerUpload,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &CompleteLayerUploadInput{}
- }
-
- output = &CompleteLayerUploadOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// CompleteLayerUpload API operation for Amazon EC2 Container Registry.
-//
-// Informs Amazon ECR that the image layer upload has completed for a specified
-// registry, repository name, and upload ID. You can optionally provide a sha256
-// digest of the image layer for data validation purposes.
-//
-// When an image is pushed, the CompleteLayerUpload API is called once per each
-// new image layer to verify that the upload has completed.
-//
-// This operation is used by the Amazon ECR proxy and is not generally used
-// by customers for pulling and pushing images. In most cases, you should use
-// the docker CLI to pull, tag, and push images.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation CompleteLayerUpload for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - UploadNotFoundException
-// The upload could not be found, or the specified upload ID is not valid for
-// this repository.
-//
-// - InvalidLayerException
-// The layer digest calculation performed by Amazon ECR upon receipt of the
-// image layer does not match the digest specified.
-//
-// - LayerPartTooSmallException
-// Layer parts must be at least 5 MiB in size.
-//
-// - LayerAlreadyExistsException
-// The image layer already exists in the associated repository.
-//
-// - EmptyUploadException
-// The specified layer upload does not contain any layer parts.
-//
-// - KmsException
-// The operation failed due to a KMS exception.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload
-func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) {
- req, out := c.CompleteLayerUploadRequest(input)
- return out, req.Send()
-}
-
-// CompleteLayerUploadWithContext is the same as CompleteLayerUpload with the addition of
-// the ability to pass a context and additional request options.
-//
-// See CompleteLayerUpload for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) CompleteLayerUploadWithContext(ctx aws.Context, input *CompleteLayerUploadInput, opts ...request.Option) (*CompleteLayerUploadOutput, error) {
- req, out := c.CompleteLayerUploadRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opCreatePullThroughCacheRule = "CreatePullThroughCacheRule"
-
-// CreatePullThroughCacheRuleRequest generates a "aws/request.Request" representing the
-// client's request for the CreatePullThroughCacheRule operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See CreatePullThroughCacheRule for more information on using the CreatePullThroughCacheRule
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the CreatePullThroughCacheRuleRequest method.
-// req, resp := client.CreatePullThroughCacheRuleRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreatePullThroughCacheRule
-func (c *ECR) CreatePullThroughCacheRuleRequest(input *CreatePullThroughCacheRuleInput) (req *request.Request, output *CreatePullThroughCacheRuleOutput) {
- op := &request.Operation{
- Name: opCreatePullThroughCacheRule,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &CreatePullThroughCacheRuleInput{}
- }
-
- output = &CreatePullThroughCacheRuleOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// CreatePullThroughCacheRule API operation for Amazon EC2 Container Registry.
-//
-// Creates a pull through cache rule. A pull through cache rule provides a way
-// to cache images from an external public registry in your Amazon ECR private
-// registry.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation CreatePullThroughCacheRule for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// - PullThroughCacheRuleAlreadyExistsException
-// A pull through cache rule with these settings already exists for the private
-// registry.
-//
-// - UnsupportedUpstreamRegistryException
-// The specified upstream registry isn't supported.
-//
-// - LimitExceededException
-// The operation did not succeed because it would have exceeded a service limit
-// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreatePullThroughCacheRule
-func (c *ECR) CreatePullThroughCacheRule(input *CreatePullThroughCacheRuleInput) (*CreatePullThroughCacheRuleOutput, error) {
- req, out := c.CreatePullThroughCacheRuleRequest(input)
- return out, req.Send()
-}
-
-// CreatePullThroughCacheRuleWithContext is the same as CreatePullThroughCacheRule with the addition of
-// the ability to pass a context and additional request options.
-//
-// See CreatePullThroughCacheRule for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) CreatePullThroughCacheRuleWithContext(ctx aws.Context, input *CreatePullThroughCacheRuleInput, opts ...request.Option) (*CreatePullThroughCacheRuleOutput, error) {
- req, out := c.CreatePullThroughCacheRuleRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opCreateRepository = "CreateRepository"
-
-// CreateRepositoryRequest generates a "aws/request.Request" representing the
-// client's request for the CreateRepository operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See CreateRepository for more information on using the CreateRepository
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the CreateRepositoryRequest method.
-// req, resp := client.CreateRepositoryRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository
-func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) {
- op := &request.Operation{
- Name: opCreateRepository,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &CreateRepositoryInput{}
- }
-
- output = &CreateRepositoryOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// CreateRepository API operation for Amazon EC2 Container Registry.
-//
-// Creates a repository. For more information, see Amazon ECR repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation CreateRepository for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - InvalidTagParameterException
-// An invalid parameter has been specified. Tag keys can have a maximum character
-// length of 128 characters, and tag values can have a maximum length of 256
-// characters.
-//
-// - TooManyTagsException
-// The list of tags on the repository is over the limit. The maximum number
-// of tags that can be applied to a repository is 50.
-//
-// - RepositoryAlreadyExistsException
-// The specified repository already exists in the specified registry.
-//
-// - LimitExceededException
-// The operation did not succeed because it would have exceeded a service limit
-// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// - KmsException
-// The operation failed due to a KMS exception.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository
-func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) {
- req, out := c.CreateRepositoryRequest(input)
- return out, req.Send()
-}
-
-// CreateRepositoryWithContext is the same as CreateRepository with the addition of
-// the ability to pass a context and additional request options.
-//
-// See CreateRepository for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) CreateRepositoryWithContext(ctx aws.Context, input *CreateRepositoryInput, opts ...request.Option) (*CreateRepositoryOutput, error) {
- req, out := c.CreateRepositoryRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeleteLifecyclePolicy = "DeleteLifecyclePolicy"
-
-// DeleteLifecyclePolicyRequest generates a "aws/request.Request" representing the
-// client's request for the DeleteLifecyclePolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeleteLifecyclePolicy for more information on using the DeleteLifecyclePolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeleteLifecyclePolicyRequest method.
-// req, resp := client.DeleteLifecyclePolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy
-func (c *ECR) DeleteLifecyclePolicyRequest(input *DeleteLifecyclePolicyInput) (req *request.Request, output *DeleteLifecyclePolicyOutput) {
- op := &request.Operation{
- Name: opDeleteLifecyclePolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeleteLifecyclePolicyInput{}
- }
-
- output = &DeleteLifecyclePolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DeleteLifecyclePolicy API operation for Amazon EC2 Container Registry.
-//
-// Deletes the lifecycle policy associated with the specified repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DeleteLifecyclePolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - LifecyclePolicyNotFoundException
-// The lifecycle policy could not be found, and no policy is set to the repository.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy
-func (c *ECR) DeleteLifecyclePolicy(input *DeleteLifecyclePolicyInput) (*DeleteLifecyclePolicyOutput, error) {
- req, out := c.DeleteLifecyclePolicyRequest(input)
- return out, req.Send()
-}
-
-// DeleteLifecyclePolicyWithContext is the same as DeleteLifecyclePolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeleteLifecyclePolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DeleteLifecyclePolicyWithContext(ctx aws.Context, input *DeleteLifecyclePolicyInput, opts ...request.Option) (*DeleteLifecyclePolicyOutput, error) {
- req, out := c.DeleteLifecyclePolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeletePullThroughCacheRule = "DeletePullThroughCacheRule"
-
-// DeletePullThroughCacheRuleRequest generates a "aws/request.Request" representing the
-// client's request for the DeletePullThroughCacheRule operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeletePullThroughCacheRule for more information on using the DeletePullThroughCacheRule
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeletePullThroughCacheRuleRequest method.
-// req, resp := client.DeletePullThroughCacheRuleRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeletePullThroughCacheRule
-func (c *ECR) DeletePullThroughCacheRuleRequest(input *DeletePullThroughCacheRuleInput) (req *request.Request, output *DeletePullThroughCacheRuleOutput) {
- op := &request.Operation{
- Name: opDeletePullThroughCacheRule,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeletePullThroughCacheRuleInput{}
- }
-
- output = &DeletePullThroughCacheRuleOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DeletePullThroughCacheRule API operation for Amazon EC2 Container Registry.
-//
-// Deletes a pull through cache rule.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DeletePullThroughCacheRule for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// - PullThroughCacheRuleNotFoundException
-// The pull through cache rule was not found. Specify a valid pull through cache
-// rule and try again.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeletePullThroughCacheRule
-func (c *ECR) DeletePullThroughCacheRule(input *DeletePullThroughCacheRuleInput) (*DeletePullThroughCacheRuleOutput, error) {
- req, out := c.DeletePullThroughCacheRuleRequest(input)
- return out, req.Send()
-}
-
-// DeletePullThroughCacheRuleWithContext is the same as DeletePullThroughCacheRule with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeletePullThroughCacheRule for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DeletePullThroughCacheRuleWithContext(ctx aws.Context, input *DeletePullThroughCacheRuleInput, opts ...request.Option) (*DeletePullThroughCacheRuleOutput, error) {
- req, out := c.DeletePullThroughCacheRuleRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeleteRegistryPolicy = "DeleteRegistryPolicy"
-
-// DeleteRegistryPolicyRequest generates a "aws/request.Request" representing the
-// client's request for the DeleteRegistryPolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeleteRegistryPolicy for more information on using the DeleteRegistryPolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeleteRegistryPolicyRequest method.
-// req, resp := client.DeleteRegistryPolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRegistryPolicy
-func (c *ECR) DeleteRegistryPolicyRequest(input *DeleteRegistryPolicyInput) (req *request.Request, output *DeleteRegistryPolicyOutput) {
- op := &request.Operation{
- Name: opDeleteRegistryPolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeleteRegistryPolicyInput{}
- }
-
- output = &DeleteRegistryPolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DeleteRegistryPolicy API operation for Amazon EC2 Container Registry.
-//
-// Deletes the registry permissions policy.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DeleteRegistryPolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RegistryPolicyNotFoundException
-// The registry doesn't have an associated registry policy.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRegistryPolicy
-func (c *ECR) DeleteRegistryPolicy(input *DeleteRegistryPolicyInput) (*DeleteRegistryPolicyOutput, error) {
- req, out := c.DeleteRegistryPolicyRequest(input)
- return out, req.Send()
-}
-
-// DeleteRegistryPolicyWithContext is the same as DeleteRegistryPolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeleteRegistryPolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DeleteRegistryPolicyWithContext(ctx aws.Context, input *DeleteRegistryPolicyInput, opts ...request.Option) (*DeleteRegistryPolicyOutput, error) {
- req, out := c.DeleteRegistryPolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeleteRepository = "DeleteRepository"
-
-// DeleteRepositoryRequest generates a "aws/request.Request" representing the
-// client's request for the DeleteRepository operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeleteRepository for more information on using the DeleteRepository
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeleteRepositoryRequest method.
-// req, resp := client.DeleteRepositoryRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository
-func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) {
- op := &request.Operation{
- Name: opDeleteRepository,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeleteRepositoryInput{}
- }
-
- output = &DeleteRepositoryOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DeleteRepository API operation for Amazon EC2 Container Registry.
-//
-// Deletes a repository. If the repository contains images, you must either
-// delete all images in the repository or use the force option to delete the
-// repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DeleteRepository for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - RepositoryNotEmptyException
-// The specified repository contains images. To delete a repository that contains
-// images, you must force the deletion with the force parameter.
-//
-// - KmsException
-// The operation failed due to a KMS exception.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository
-func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) {
- req, out := c.DeleteRepositoryRequest(input)
- return out, req.Send()
-}
-
-// DeleteRepositoryWithContext is the same as DeleteRepository with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeleteRepository for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DeleteRepositoryWithContext(ctx aws.Context, input *DeleteRepositoryInput, opts ...request.Option) (*DeleteRepositoryOutput, error) {
- req, out := c.DeleteRepositoryRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy"
-
-// DeleteRepositoryPolicyRequest generates a "aws/request.Request" representing the
-// client's request for the DeleteRepositoryPolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeleteRepositoryPolicy for more information on using the DeleteRepositoryPolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeleteRepositoryPolicyRequest method.
-// req, resp := client.DeleteRepositoryPolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy
-func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) (req *request.Request, output *DeleteRepositoryPolicyOutput) {
- op := &request.Operation{
- Name: opDeleteRepositoryPolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeleteRepositoryPolicyInput{}
- }
-
- output = &DeleteRepositoryPolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DeleteRepositoryPolicy API operation for Amazon EC2 Container Registry.
-//
-// Deletes the repository policy associated with the specified repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DeleteRepositoryPolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - RepositoryPolicyNotFoundException
-// The specified repository and registry combination does not have an associated
-// repository policy.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy
-func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*DeleteRepositoryPolicyOutput, error) {
- req, out := c.DeleteRepositoryPolicyRequest(input)
- return out, req.Send()
-}
-
-// DeleteRepositoryPolicyWithContext is the same as DeleteRepositoryPolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeleteRepositoryPolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DeleteRepositoryPolicyWithContext(ctx aws.Context, input *DeleteRepositoryPolicyInput, opts ...request.Option) (*DeleteRepositoryPolicyOutput, error) {
- req, out := c.DeleteRepositoryPolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeImageReplicationStatus = "DescribeImageReplicationStatus"
-
-// DescribeImageReplicationStatusRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeImageReplicationStatus operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeImageReplicationStatus for more information on using the DescribeImageReplicationStatus
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeImageReplicationStatusRequest method.
-// req, resp := client.DescribeImageReplicationStatusRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageReplicationStatus
-func (c *ECR) DescribeImageReplicationStatusRequest(input *DescribeImageReplicationStatusInput) (req *request.Request, output *DescribeImageReplicationStatusOutput) {
- op := &request.Operation{
- Name: opDescribeImageReplicationStatus,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeImageReplicationStatusInput{}
- }
-
- output = &DescribeImageReplicationStatusOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeImageReplicationStatus API operation for Amazon EC2 Container Registry.
-//
-// Returns the replication status for a specified image.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DescribeImageReplicationStatus for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ImageNotFoundException
-// The image requested does not exist in the specified repository.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageReplicationStatus
-func (c *ECR) DescribeImageReplicationStatus(input *DescribeImageReplicationStatusInput) (*DescribeImageReplicationStatusOutput, error) {
- req, out := c.DescribeImageReplicationStatusRequest(input)
- return out, req.Send()
-}
-
-// DescribeImageReplicationStatusWithContext is the same as DescribeImageReplicationStatus with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeImageReplicationStatus for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribeImageReplicationStatusWithContext(ctx aws.Context, input *DescribeImageReplicationStatusInput, opts ...request.Option) (*DescribeImageReplicationStatusOutput, error) {
- req, out := c.DescribeImageReplicationStatusRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeImageScanFindings = "DescribeImageScanFindings"
-
-// DescribeImageScanFindingsRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeImageScanFindings operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeImageScanFindings for more information on using the DescribeImageScanFindings
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeImageScanFindingsRequest method.
-// req, resp := client.DescribeImageScanFindingsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings
-func (c *ECR) DescribeImageScanFindingsRequest(input *DescribeImageScanFindingsInput) (req *request.Request, output *DescribeImageScanFindingsOutput) {
- op := &request.Operation{
- Name: opDescribeImageScanFindings,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"nextToken"},
- OutputTokens: []string{"nextToken"},
- LimitToken: "maxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &DescribeImageScanFindingsInput{}
- }
-
- output = &DescribeImageScanFindingsOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeImageScanFindings API operation for Amazon EC2 Container Registry.
-//
-// Returns the scan findings for the specified image.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DescribeImageScanFindings for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ImageNotFoundException
-// The image requested does not exist in the specified repository.
-//
-// - ScanNotFoundException
-// The specified image scan could not be found. Ensure that image scanning is
-// enabled on the repository and try again.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings
-func (c *ECR) DescribeImageScanFindings(input *DescribeImageScanFindingsInput) (*DescribeImageScanFindingsOutput, error) {
- req, out := c.DescribeImageScanFindingsRequest(input)
- return out, req.Send()
-}
-
-// DescribeImageScanFindingsWithContext is the same as DescribeImageScanFindings with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeImageScanFindings for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribeImageScanFindingsWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, opts ...request.Option) (*DescribeImageScanFindingsOutput, error) {
- req, out := c.DescribeImageScanFindingsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// DescribeImageScanFindingsPages iterates over the pages of a DescribeImageScanFindings operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See DescribeImageScanFindings method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a DescribeImageScanFindings operation.
-// pageNum := 0
-// err := client.DescribeImageScanFindingsPages(params,
-// func(page *ecr.DescribeImageScanFindingsOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *ECR) DescribeImageScanFindingsPages(input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool) error {
- return c.DescribeImageScanFindingsPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// DescribeImageScanFindingsPagesWithContext same as DescribeImageScanFindingsPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribeImageScanFindingsPagesWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *DescribeImageScanFindingsInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.DescribeImageScanFindingsRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*DescribeImageScanFindingsOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opDescribeImages = "DescribeImages"
-
-// DescribeImagesRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeImages operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeImages for more information on using the DescribeImages
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeImagesRequest method.
-// req, resp := client.DescribeImagesRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages
-func (c *ECR) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) {
- op := &request.Operation{
- Name: opDescribeImages,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"nextToken"},
- OutputTokens: []string{"nextToken"},
- LimitToken: "maxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &DescribeImagesInput{}
- }
-
- output = &DescribeImagesOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeImages API operation for Amazon EC2 Container Registry.
-//
-// Returns metadata about the images in a repository.
-//
-// Beginning with Docker version 1.9, the Docker client compresses image layers
-// before pushing them to a V2 Docker registry. The output of the docker images
-// command shows the uncompressed image size, so it may return a larger image
-// size than the image sizes returned by DescribeImages.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DescribeImages for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ImageNotFoundException
-// The image requested does not exist in the specified repository.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages
-func (c *ECR) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) {
- req, out := c.DescribeImagesRequest(input)
- return out, req.Send()
-}
-
-// DescribeImagesWithContext is the same as DescribeImages with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeImages for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.Option) (*DescribeImagesOutput, error) {
- req, out := c.DescribeImagesRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// DescribeImagesPages iterates over the pages of a DescribeImages operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See DescribeImages method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a DescribeImages operation.
-// pageNum := 0
-// err := client.DescribeImagesPages(params,
-// func(page *ecr.DescribeImagesOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *ECR) DescribeImagesPages(input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool) error {
- return c.DescribeImagesPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// DescribeImagesPagesWithContext same as DescribeImagesPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribeImagesPagesWithContext(ctx aws.Context, input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *DescribeImagesInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.DescribeImagesRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*DescribeImagesOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opDescribePullThroughCacheRules = "DescribePullThroughCacheRules"
-
-// DescribePullThroughCacheRulesRequest generates a "aws/request.Request" representing the
-// client's request for the DescribePullThroughCacheRules operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribePullThroughCacheRules for more information on using the DescribePullThroughCacheRules
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribePullThroughCacheRulesRequest method.
-// req, resp := client.DescribePullThroughCacheRulesRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribePullThroughCacheRules
-func (c *ECR) DescribePullThroughCacheRulesRequest(input *DescribePullThroughCacheRulesInput) (req *request.Request, output *DescribePullThroughCacheRulesOutput) {
- op := &request.Operation{
- Name: opDescribePullThroughCacheRules,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"nextToken"},
- OutputTokens: []string{"nextToken"},
- LimitToken: "maxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &DescribePullThroughCacheRulesInput{}
- }
-
- output = &DescribePullThroughCacheRulesOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribePullThroughCacheRules API operation for Amazon EC2 Container Registry.
-//
-// Returns the pull through cache rules for a registry.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DescribePullThroughCacheRules for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// - PullThroughCacheRuleNotFoundException
-// The pull through cache rule was not found. Specify a valid pull through cache
-// rule and try again.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribePullThroughCacheRules
-func (c *ECR) DescribePullThroughCacheRules(input *DescribePullThroughCacheRulesInput) (*DescribePullThroughCacheRulesOutput, error) {
- req, out := c.DescribePullThroughCacheRulesRequest(input)
- return out, req.Send()
-}
-
-// DescribePullThroughCacheRulesWithContext is the same as DescribePullThroughCacheRules with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribePullThroughCacheRules for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribePullThroughCacheRulesWithContext(ctx aws.Context, input *DescribePullThroughCacheRulesInput, opts ...request.Option) (*DescribePullThroughCacheRulesOutput, error) {
- req, out := c.DescribePullThroughCacheRulesRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// DescribePullThroughCacheRulesPages iterates over the pages of a DescribePullThroughCacheRules operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See DescribePullThroughCacheRules method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a DescribePullThroughCacheRules operation.
-// pageNum := 0
-// err := client.DescribePullThroughCacheRulesPages(params,
-// func(page *ecr.DescribePullThroughCacheRulesOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *ECR) DescribePullThroughCacheRulesPages(input *DescribePullThroughCacheRulesInput, fn func(*DescribePullThroughCacheRulesOutput, bool) bool) error {
- return c.DescribePullThroughCacheRulesPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// DescribePullThroughCacheRulesPagesWithContext same as DescribePullThroughCacheRulesPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribePullThroughCacheRulesPagesWithContext(ctx aws.Context, input *DescribePullThroughCacheRulesInput, fn func(*DescribePullThroughCacheRulesOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *DescribePullThroughCacheRulesInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.DescribePullThroughCacheRulesRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*DescribePullThroughCacheRulesOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opDescribeRegistry = "DescribeRegistry"
-
-// DescribeRegistryRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeRegistry operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeRegistry for more information on using the DescribeRegistry
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeRegistryRequest method.
-// req, resp := client.DescribeRegistryRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRegistry
-func (c *ECR) DescribeRegistryRequest(input *DescribeRegistryInput) (req *request.Request, output *DescribeRegistryOutput) {
- op := &request.Operation{
- Name: opDescribeRegistry,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeRegistryInput{}
- }
-
- output = &DescribeRegistryOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeRegistry API operation for Amazon EC2 Container Registry.
-//
-// Describes the settings for a registry. The replication configuration for
-// a repository can be created or updated with the PutReplicationConfiguration
-// API action.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DescribeRegistry for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRegistry
-func (c *ECR) DescribeRegistry(input *DescribeRegistryInput) (*DescribeRegistryOutput, error) {
- req, out := c.DescribeRegistryRequest(input)
- return out, req.Send()
-}
-
-// DescribeRegistryWithContext is the same as DescribeRegistry with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeRegistry for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribeRegistryWithContext(ctx aws.Context, input *DescribeRegistryInput, opts ...request.Option) (*DescribeRegistryOutput, error) {
- req, out := c.DescribeRegistryRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeRepositories = "DescribeRepositories"
-
-// DescribeRepositoriesRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeRepositories operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeRepositories for more information on using the DescribeRepositories
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeRepositoriesRequest method.
-// req, resp := client.DescribeRepositoriesRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories
-func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req *request.Request, output *DescribeRepositoriesOutput) {
- op := &request.Operation{
- Name: opDescribeRepositories,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"nextToken"},
- OutputTokens: []string{"nextToken"},
- LimitToken: "maxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &DescribeRepositoriesInput{}
- }
-
- output = &DescribeRepositoriesOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeRepositories API operation for Amazon EC2 Container Registry.
-//
-// Describes image repositories in a registry.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation DescribeRepositories for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories
-func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeRepositoriesOutput, error) {
- req, out := c.DescribeRepositoriesRequest(input)
- return out, req.Send()
-}
-
-// DescribeRepositoriesWithContext is the same as DescribeRepositories with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeRepositories for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribeRepositoriesWithContext(ctx aws.Context, input *DescribeRepositoriesInput, opts ...request.Option) (*DescribeRepositoriesOutput, error) {
- req, out := c.DescribeRepositoriesRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// DescribeRepositoriesPages iterates over the pages of a DescribeRepositories operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See DescribeRepositories method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a DescribeRepositories operation.
-// pageNum := 0
-// err := client.DescribeRepositoriesPages(params,
-// func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *ECR) DescribeRepositoriesPages(input *DescribeRepositoriesInput, fn func(*DescribeRepositoriesOutput, bool) bool) error {
- return c.DescribeRepositoriesPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// DescribeRepositoriesPagesWithContext same as DescribeRepositoriesPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) DescribeRepositoriesPagesWithContext(ctx aws.Context, input *DescribeRepositoriesInput, fn func(*DescribeRepositoriesOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *DescribeRepositoriesInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.DescribeRepositoriesRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*DescribeRepositoriesOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opGetAuthorizationToken = "GetAuthorizationToken"
-
-// GetAuthorizationTokenRequest generates a "aws/request.Request" representing the
-// client's request for the GetAuthorizationToken operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetAuthorizationToken for more information on using the GetAuthorizationToken
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetAuthorizationTokenRequest method.
-// req, resp := client.GetAuthorizationTokenRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken
-func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) {
- op := &request.Operation{
- Name: opGetAuthorizationToken,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetAuthorizationTokenInput{}
- }
-
- output = &GetAuthorizationTokenOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetAuthorizationToken API operation for Amazon EC2 Container Registry.
-//
-// Retrieves an authorization token. An authorization token represents your
-// IAM authentication credentials and can be used to access any Amazon ECR registry
-// that your IAM principal has access to. The authorization token is valid for
-// 12 hours.
-//
-// The authorizationToken returned is a base64 encoded string that can be decoded
-// and used in a docker login command to authenticate to a registry. The CLI
-// offers an get-login-password command that simplifies the login process. For
-// more information, see Registry authentication (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation GetAuthorizationToken for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken
-func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) {
- req, out := c.GetAuthorizationTokenRequest(input)
- return out, req.Send()
-}
-
-// GetAuthorizationTokenWithContext is the same as GetAuthorizationToken with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetAuthorizationToken for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) GetAuthorizationTokenWithContext(ctx aws.Context, input *GetAuthorizationTokenInput, opts ...request.Option) (*GetAuthorizationTokenOutput, error) {
- req, out := c.GetAuthorizationTokenRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer"
-
-// GetDownloadUrlForLayerRequest generates a "aws/request.Request" representing the
-// client's request for the GetDownloadUrlForLayer operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetDownloadUrlForLayer for more information on using the GetDownloadUrlForLayer
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetDownloadUrlForLayerRequest method.
-// req, resp := client.GetDownloadUrlForLayerRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer
-func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) (req *request.Request, output *GetDownloadUrlForLayerOutput) {
- op := &request.Operation{
- Name: opGetDownloadUrlForLayer,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetDownloadUrlForLayerInput{}
- }
-
- output = &GetDownloadUrlForLayerOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetDownloadUrlForLayer API operation for Amazon EC2 Container Registry.
-//
-// Retrieves the pre-signed Amazon S3 download URL corresponding to an image
-// layer. You can only get URLs for image layers that are referenced in an image.
-//
-// When an image is pulled, the GetDownloadUrlForLayer API is called once per
-// image layer that is not already cached.
-//
-// This operation is used by the Amazon ECR proxy and is not generally used
-// by customers for pulling and pushing images. In most cases, you should use
-// the docker CLI to pull, tag, and push images.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation GetDownloadUrlForLayer for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - LayersNotFoundException
-// The specified layers could not be found, or the specified layer is not valid
-// for this repository.
-//
-// - LayerInaccessibleException
-// The specified layer is not available because it is not associated with an
-// image. Unassociated image layers may be cleaned up at any time.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer
-func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) {
- req, out := c.GetDownloadUrlForLayerRequest(input)
- return out, req.Send()
-}
-
-// GetDownloadUrlForLayerWithContext is the same as GetDownloadUrlForLayer with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetDownloadUrlForLayer for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) GetDownloadUrlForLayerWithContext(ctx aws.Context, input *GetDownloadUrlForLayerInput, opts ...request.Option) (*GetDownloadUrlForLayerOutput, error) {
- req, out := c.GetDownloadUrlForLayerRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetLifecyclePolicy = "GetLifecyclePolicy"
-
-// GetLifecyclePolicyRequest generates a "aws/request.Request" representing the
-// client's request for the GetLifecyclePolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetLifecyclePolicy for more information on using the GetLifecyclePolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetLifecyclePolicyRequest method.
-// req, resp := client.GetLifecyclePolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy
-func (c *ECR) GetLifecyclePolicyRequest(input *GetLifecyclePolicyInput) (req *request.Request, output *GetLifecyclePolicyOutput) {
- op := &request.Operation{
- Name: opGetLifecyclePolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetLifecyclePolicyInput{}
- }
-
- output = &GetLifecyclePolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetLifecyclePolicy API operation for Amazon EC2 Container Registry.
-//
-// Retrieves the lifecycle policy for the specified repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation GetLifecyclePolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - LifecyclePolicyNotFoundException
-// The lifecycle policy could not be found, and no policy is set to the repository.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy
-func (c *ECR) GetLifecyclePolicy(input *GetLifecyclePolicyInput) (*GetLifecyclePolicyOutput, error) {
- req, out := c.GetLifecyclePolicyRequest(input)
- return out, req.Send()
-}
-
-// GetLifecyclePolicyWithContext is the same as GetLifecyclePolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetLifecyclePolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) GetLifecyclePolicyWithContext(ctx aws.Context, input *GetLifecyclePolicyInput, opts ...request.Option) (*GetLifecyclePolicyOutput, error) {
- req, out := c.GetLifecyclePolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetLifecyclePolicyPreview = "GetLifecyclePolicyPreview"
-
-// GetLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the
-// client's request for the GetLifecyclePolicyPreview operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetLifecyclePolicyPreview for more information on using the GetLifecyclePolicyPreview
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetLifecyclePolicyPreviewRequest method.
-// req, resp := client.GetLifecyclePolicyPreviewRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview
-func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewInput) (req *request.Request, output *GetLifecyclePolicyPreviewOutput) {
- op := &request.Operation{
- Name: opGetLifecyclePolicyPreview,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"nextToken"},
- OutputTokens: []string{"nextToken"},
- LimitToken: "maxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &GetLifecyclePolicyPreviewInput{}
- }
-
- output = &GetLifecyclePolicyPreviewOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetLifecyclePolicyPreview API operation for Amazon EC2 Container Registry.
-//
-// Retrieves the results of the lifecycle policy preview request for the specified
-// repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation GetLifecyclePolicyPreview for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - LifecyclePolicyPreviewNotFoundException
-// There is no dry run for this repository.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview
-func (c *ECR) GetLifecyclePolicyPreview(input *GetLifecyclePolicyPreviewInput) (*GetLifecyclePolicyPreviewOutput, error) {
- req, out := c.GetLifecyclePolicyPreviewRequest(input)
- return out, req.Send()
-}
-
-// GetLifecyclePolicyPreviewWithContext is the same as GetLifecyclePolicyPreview with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetLifecyclePolicyPreview for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, opts ...request.Option) (*GetLifecyclePolicyPreviewOutput, error) {
- req, out := c.GetLifecyclePolicyPreviewRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// GetLifecyclePolicyPreviewPages iterates over the pages of a GetLifecyclePolicyPreview operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See GetLifecyclePolicyPreview method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a GetLifecyclePolicyPreview operation.
-// pageNum := 0
-// err := client.GetLifecyclePolicyPreviewPages(params,
-// func(page *ecr.GetLifecyclePolicyPreviewOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *ECR) GetLifecyclePolicyPreviewPages(input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool) error {
- return c.GetLifecyclePolicyPreviewPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// GetLifecyclePolicyPreviewPagesWithContext same as GetLifecyclePolicyPreviewPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) GetLifecyclePolicyPreviewPagesWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *GetLifecyclePolicyPreviewInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.GetLifecyclePolicyPreviewRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*GetLifecyclePolicyPreviewOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opGetRegistryPolicy = "GetRegistryPolicy"
-
-// GetRegistryPolicyRequest generates a "aws/request.Request" representing the
-// client's request for the GetRegistryPolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetRegistryPolicy for more information on using the GetRegistryPolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetRegistryPolicyRequest method.
-// req, resp := client.GetRegistryPolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRegistryPolicy
-func (c *ECR) GetRegistryPolicyRequest(input *GetRegistryPolicyInput) (req *request.Request, output *GetRegistryPolicyOutput) {
- op := &request.Operation{
- Name: opGetRegistryPolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetRegistryPolicyInput{}
- }
-
- output = &GetRegistryPolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetRegistryPolicy API operation for Amazon EC2 Container Registry.
-//
-// Retrieves the permissions policy for a registry.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation GetRegistryPolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RegistryPolicyNotFoundException
-// The registry doesn't have an associated registry policy.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRegistryPolicy
-func (c *ECR) GetRegistryPolicy(input *GetRegistryPolicyInput) (*GetRegistryPolicyOutput, error) {
- req, out := c.GetRegistryPolicyRequest(input)
- return out, req.Send()
-}
-
-// GetRegistryPolicyWithContext is the same as GetRegistryPolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetRegistryPolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) GetRegistryPolicyWithContext(ctx aws.Context, input *GetRegistryPolicyInput, opts ...request.Option) (*GetRegistryPolicyOutput, error) {
- req, out := c.GetRegistryPolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetRegistryScanningConfiguration = "GetRegistryScanningConfiguration"
-
-// GetRegistryScanningConfigurationRequest generates a "aws/request.Request" representing the
-// client's request for the GetRegistryScanningConfiguration operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetRegistryScanningConfiguration for more information on using the GetRegistryScanningConfiguration
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetRegistryScanningConfigurationRequest method.
-// req, resp := client.GetRegistryScanningConfigurationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRegistryScanningConfiguration
-func (c *ECR) GetRegistryScanningConfigurationRequest(input *GetRegistryScanningConfigurationInput) (req *request.Request, output *GetRegistryScanningConfigurationOutput) {
- op := &request.Operation{
- Name: opGetRegistryScanningConfiguration,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetRegistryScanningConfigurationInput{}
- }
-
- output = &GetRegistryScanningConfigurationOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetRegistryScanningConfiguration API operation for Amazon EC2 Container Registry.
-//
-// Retrieves the scanning configuration for a registry.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation GetRegistryScanningConfiguration for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRegistryScanningConfiguration
-func (c *ECR) GetRegistryScanningConfiguration(input *GetRegistryScanningConfigurationInput) (*GetRegistryScanningConfigurationOutput, error) {
- req, out := c.GetRegistryScanningConfigurationRequest(input)
- return out, req.Send()
-}
-
-// GetRegistryScanningConfigurationWithContext is the same as GetRegistryScanningConfiguration with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetRegistryScanningConfiguration for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) GetRegistryScanningConfigurationWithContext(ctx aws.Context, input *GetRegistryScanningConfigurationInput, opts ...request.Option) (*GetRegistryScanningConfigurationOutput, error) {
- req, out := c.GetRegistryScanningConfigurationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetRepositoryPolicy = "GetRepositoryPolicy"
-
-// GetRepositoryPolicyRequest generates a "aws/request.Request" representing the
-// client's request for the GetRepositoryPolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetRepositoryPolicy for more information on using the GetRepositoryPolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetRepositoryPolicyRequest method.
-// req, resp := client.GetRepositoryPolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy
-func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req *request.Request, output *GetRepositoryPolicyOutput) {
- op := &request.Operation{
- Name: opGetRepositoryPolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetRepositoryPolicyInput{}
- }
-
- output = &GetRepositoryPolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetRepositoryPolicy API operation for Amazon EC2 Container Registry.
-//
-// Retrieves the repository policy for the specified repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation GetRepositoryPolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - RepositoryPolicyNotFoundException
-// The specified repository and registry combination does not have an associated
-// repository policy.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy
-func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetRepositoryPolicyOutput, error) {
- req, out := c.GetRepositoryPolicyRequest(input)
- return out, req.Send()
-}
-
-// GetRepositoryPolicyWithContext is the same as GetRepositoryPolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetRepositoryPolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) GetRepositoryPolicyWithContext(ctx aws.Context, input *GetRepositoryPolicyInput, opts ...request.Option) (*GetRepositoryPolicyOutput, error) {
- req, out := c.GetRepositoryPolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opInitiateLayerUpload = "InitiateLayerUpload"
-
-// InitiateLayerUploadRequest generates a "aws/request.Request" representing the
-// client's request for the InitiateLayerUpload operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See InitiateLayerUpload for more information on using the InitiateLayerUpload
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the InitiateLayerUploadRequest method.
-// req, resp := client.InitiateLayerUploadRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload
-func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *request.Request, output *InitiateLayerUploadOutput) {
- op := &request.Operation{
- Name: opInitiateLayerUpload,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &InitiateLayerUploadInput{}
- }
-
- output = &InitiateLayerUploadOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// InitiateLayerUpload API operation for Amazon EC2 Container Registry.
-//
-// Notifies Amazon ECR that you intend to upload an image layer.
-//
-// When an image is pushed, the InitiateLayerUpload API is called once per image
-// layer that has not already been uploaded. Whether or not an image layer has
-// been uploaded is determined by the BatchCheckLayerAvailability API action.
-//
-// This operation is used by the Amazon ECR proxy and is not generally used
-// by customers for pulling and pushing images. In most cases, you should use
-// the docker CLI to pull, tag, and push images.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation InitiateLayerUpload for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - KmsException
-// The operation failed due to a KMS exception.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload
-func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) {
- req, out := c.InitiateLayerUploadRequest(input)
- return out, req.Send()
-}
-
-// InitiateLayerUploadWithContext is the same as InitiateLayerUpload with the addition of
-// the ability to pass a context and additional request options.
-//
-// See InitiateLayerUpload for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) InitiateLayerUploadWithContext(ctx aws.Context, input *InitiateLayerUploadInput, opts ...request.Option) (*InitiateLayerUploadOutput, error) {
- req, out := c.InitiateLayerUploadRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opListImages = "ListImages"
-
-// ListImagesRequest generates a "aws/request.Request" representing the
-// client's request for the ListImages operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListImages for more information on using the ListImages
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListImagesRequest method.
-// req, resp := client.ListImagesRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages
-func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, output *ListImagesOutput) {
- op := &request.Operation{
- Name: opListImages,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"nextToken"},
- OutputTokens: []string{"nextToken"},
- LimitToken: "maxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &ListImagesInput{}
- }
-
- output = &ListImagesOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ListImages API operation for Amazon EC2 Container Registry.
-//
-// Lists all the image IDs for the specified repository.
-//
-// You can filter images based on whether or not they are tagged by using the
-// tagStatus filter and specifying either TAGGED, UNTAGGED or ANY. For example,
-// you can filter your results to return only UNTAGGED images and then pipe
-// that result to a BatchDeleteImage operation to delete them. Or, you can filter
-// your results to return only TAGGED images to list all of the tags in your
-// repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation ListImages for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages
-func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) {
- req, out := c.ListImagesRequest(input)
- return out, req.Send()
-}
-
-// ListImagesWithContext is the same as ListImages with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListImages for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) ListImagesWithContext(ctx aws.Context, input *ListImagesInput, opts ...request.Option) (*ListImagesOutput, error) {
- req, out := c.ListImagesRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// ListImagesPages iterates over the pages of a ListImages operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See ListImages method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a ListImages operation.
-// pageNum := 0
-// err := client.ListImagesPages(params,
-// func(page *ecr.ListImagesOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *ECR) ListImagesPages(input *ListImagesInput, fn func(*ListImagesOutput, bool) bool) error {
- return c.ListImagesPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// ListImagesPagesWithContext same as ListImagesPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) ListImagesPagesWithContext(ctx aws.Context, input *ListImagesInput, fn func(*ListImagesOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *ListImagesInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.ListImagesRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*ListImagesOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opListTagsForResource = "ListTagsForResource"
-
-// ListTagsForResourceRequest generates a "aws/request.Request" representing the
-// client's request for the ListTagsForResource operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListTagsForResource for more information on using the ListTagsForResource
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListTagsForResourceRequest method.
-// req, resp := client.ListTagsForResourceRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListTagsForResource
-func (c *ECR) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
- op := &request.Operation{
- Name: opListTagsForResource,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &ListTagsForResourceInput{}
- }
-
- output = &ListTagsForResourceOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ListTagsForResource API operation for Amazon EC2 Container Registry.
-//
-// List the tags for an Amazon ECR resource.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation ListTagsForResource for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListTagsForResource
-func (c *ECR) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) {
- req, out := c.ListTagsForResourceRequest(input)
- return out, req.Send()
-}
-
-// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListTagsForResource for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) {
- req, out := c.ListTagsForResourceRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutImage = "PutImage"
-
-// PutImageRequest generates a "aws/request.Request" representing the
-// client's request for the PutImage operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutImage for more information on using the PutImage
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutImageRequest method.
-// req, resp := client.PutImageRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage
-func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, output *PutImageOutput) {
- op := &request.Operation{
- Name: opPutImage,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutImageInput{}
- }
-
- output = &PutImageOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// PutImage API operation for Amazon EC2 Container Registry.
-//
-// Creates or updates the image manifest and tags associated with an image.
-//
-// When an image is pushed and all new image layers have been uploaded, the
-// PutImage API is called once to create or update the image manifest and the
-// tags associated with the image.
-//
-// This operation is used by the Amazon ECR proxy and is not generally used
-// by customers for pulling and pushing images. In most cases, you should use
-// the docker CLI to pull, tag, and push images.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation PutImage for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ImageAlreadyExistsException
-// The specified image has already been pushed, and there were no changes to
-// the manifest or image tag after the last push.
-//
-// - LayersNotFoundException
-// The specified layers could not be found, or the specified layer is not valid
-// for this repository.
-//
-// - ReferencedImagesNotFoundException
-// The manifest list is referencing an image that does not exist.
-//
-// - LimitExceededException
-// The operation did not succeed because it would have exceeded a service limit
-// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// - ImageTagAlreadyExistsException
-// The specified image is tagged with a tag that already exists. The repository
-// is configured for tag immutability.
-//
-// - ImageDigestDoesNotMatchException
-// The specified image digest does not match the digest that Amazon ECR calculated
-// for the image.
-//
-// - KmsException
-// The operation failed due to a KMS exception.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage
-func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) {
- req, out := c.PutImageRequest(input)
- return out, req.Send()
-}
-
-// PutImageWithContext is the same as PutImage with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutImage for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts ...request.Option) (*PutImageOutput, error) {
- req, out := c.PutImageRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutImageScanningConfiguration = "PutImageScanningConfiguration"
-
-// PutImageScanningConfigurationRequest generates a "aws/request.Request" representing the
-// client's request for the PutImageScanningConfiguration operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutImageScanningConfiguration for more information on using the PutImageScanningConfiguration
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutImageScanningConfigurationRequest method.
-// req, resp := client.PutImageScanningConfigurationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration
-func (c *ECR) PutImageScanningConfigurationRequest(input *PutImageScanningConfigurationInput) (req *request.Request, output *PutImageScanningConfigurationOutput) {
- op := &request.Operation{
- Name: opPutImageScanningConfiguration,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutImageScanningConfigurationInput{}
- }
-
- output = &PutImageScanningConfigurationOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// PutImageScanningConfiguration API operation for Amazon EC2 Container Registry.
-//
-// The PutImageScanningConfiguration API is being deprecated, in favor of specifying
-// the image scanning configuration at the registry level. For more information,
-// see PutRegistryScanningConfiguration.
-//
-// Updates the image scanning configuration for the specified repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation PutImageScanningConfiguration for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration
-func (c *ECR) PutImageScanningConfiguration(input *PutImageScanningConfigurationInput) (*PutImageScanningConfigurationOutput, error) {
- req, out := c.PutImageScanningConfigurationRequest(input)
- return out, req.Send()
-}
-
-// PutImageScanningConfigurationWithContext is the same as PutImageScanningConfiguration with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutImageScanningConfiguration for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) PutImageScanningConfigurationWithContext(ctx aws.Context, input *PutImageScanningConfigurationInput, opts ...request.Option) (*PutImageScanningConfigurationOutput, error) {
- req, out := c.PutImageScanningConfigurationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutImageTagMutability = "PutImageTagMutability"
-
-// PutImageTagMutabilityRequest generates a "aws/request.Request" representing the
-// client's request for the PutImageTagMutability operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutImageTagMutability for more information on using the PutImageTagMutability
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutImageTagMutabilityRequest method.
-// req, resp := client.PutImageTagMutabilityRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageTagMutability
-func (c *ECR) PutImageTagMutabilityRequest(input *PutImageTagMutabilityInput) (req *request.Request, output *PutImageTagMutabilityOutput) {
- op := &request.Operation{
- Name: opPutImageTagMutability,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutImageTagMutabilityInput{}
- }
-
- output = &PutImageTagMutabilityOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// PutImageTagMutability API operation for Amazon EC2 Container Registry.
-//
-// Updates the image tag mutability settings for the specified repository. For
-// more information, see Image tag mutability (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation PutImageTagMutability for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageTagMutability
-func (c *ECR) PutImageTagMutability(input *PutImageTagMutabilityInput) (*PutImageTagMutabilityOutput, error) {
- req, out := c.PutImageTagMutabilityRequest(input)
- return out, req.Send()
-}
-
-// PutImageTagMutabilityWithContext is the same as PutImageTagMutability with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutImageTagMutability for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) PutImageTagMutabilityWithContext(ctx aws.Context, input *PutImageTagMutabilityInput, opts ...request.Option) (*PutImageTagMutabilityOutput, error) {
- req, out := c.PutImageTagMutabilityRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutLifecyclePolicy = "PutLifecyclePolicy"
-
-// PutLifecyclePolicyRequest generates a "aws/request.Request" representing the
-// client's request for the PutLifecyclePolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutLifecyclePolicy for more information on using the PutLifecyclePolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutLifecyclePolicyRequest method.
-// req, resp := client.PutLifecyclePolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy
-func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *request.Request, output *PutLifecyclePolicyOutput) {
- op := &request.Operation{
- Name: opPutLifecyclePolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutLifecyclePolicyInput{}
- }
-
- output = &PutLifecyclePolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// PutLifecyclePolicy API operation for Amazon EC2 Container Registry.
-//
-// Creates or updates the lifecycle policy for the specified repository. For
-// more information, see Lifecycle policy template (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html).
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation PutLifecyclePolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy
-func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) {
- req, out := c.PutLifecyclePolicyRequest(input)
- return out, req.Send()
-}
-
-// PutLifecyclePolicyWithContext is the same as PutLifecyclePolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutLifecyclePolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLifecyclePolicyInput, opts ...request.Option) (*PutLifecyclePolicyOutput, error) {
- req, out := c.PutLifecyclePolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutRegistryPolicy = "PutRegistryPolicy"
-
-// PutRegistryPolicyRequest generates a "aws/request.Request" representing the
-// client's request for the PutRegistryPolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutRegistryPolicy for more information on using the PutRegistryPolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutRegistryPolicyRequest method.
-// req, resp := client.PutRegistryPolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutRegistryPolicy
-func (c *ECR) PutRegistryPolicyRequest(input *PutRegistryPolicyInput) (req *request.Request, output *PutRegistryPolicyOutput) {
- op := &request.Operation{
- Name: opPutRegistryPolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutRegistryPolicyInput{}
- }
-
- output = &PutRegistryPolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// PutRegistryPolicy API operation for Amazon EC2 Container Registry.
-//
-// Creates or updates the permissions policy for your registry.
-//
-// A registry policy is used to specify permissions for another Amazon Web Services
-// account and is used when configuring cross-account replication. For more
-// information, see Registry permissions (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation PutRegistryPolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutRegistryPolicy
-func (c *ECR) PutRegistryPolicy(input *PutRegistryPolicyInput) (*PutRegistryPolicyOutput, error) {
- req, out := c.PutRegistryPolicyRequest(input)
- return out, req.Send()
-}
-
-// PutRegistryPolicyWithContext is the same as PutRegistryPolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutRegistryPolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) PutRegistryPolicyWithContext(ctx aws.Context, input *PutRegistryPolicyInput, opts ...request.Option) (*PutRegistryPolicyOutput, error) {
- req, out := c.PutRegistryPolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutRegistryScanningConfiguration = "PutRegistryScanningConfiguration"
-
-// PutRegistryScanningConfigurationRequest generates a "aws/request.Request" representing the
-// client's request for the PutRegistryScanningConfiguration operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutRegistryScanningConfiguration for more information on using the PutRegistryScanningConfiguration
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutRegistryScanningConfigurationRequest method.
-// req, resp := client.PutRegistryScanningConfigurationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutRegistryScanningConfiguration
-func (c *ECR) PutRegistryScanningConfigurationRequest(input *PutRegistryScanningConfigurationInput) (req *request.Request, output *PutRegistryScanningConfigurationOutput) {
- op := &request.Operation{
- Name: opPutRegistryScanningConfiguration,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutRegistryScanningConfigurationInput{}
- }
-
- output = &PutRegistryScanningConfigurationOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// PutRegistryScanningConfiguration API operation for Amazon EC2 Container Registry.
-//
-// Creates or updates the scanning configuration for your private registry.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation PutRegistryScanningConfiguration for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutRegistryScanningConfiguration
-func (c *ECR) PutRegistryScanningConfiguration(input *PutRegistryScanningConfigurationInput) (*PutRegistryScanningConfigurationOutput, error) {
- req, out := c.PutRegistryScanningConfigurationRequest(input)
- return out, req.Send()
-}
-
-// PutRegistryScanningConfigurationWithContext is the same as PutRegistryScanningConfiguration with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutRegistryScanningConfiguration for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) PutRegistryScanningConfigurationWithContext(ctx aws.Context, input *PutRegistryScanningConfigurationInput, opts ...request.Option) (*PutRegistryScanningConfigurationOutput, error) {
- req, out := c.PutRegistryScanningConfigurationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutReplicationConfiguration = "PutReplicationConfiguration"
-
-// PutReplicationConfigurationRequest generates a "aws/request.Request" representing the
-// client's request for the PutReplicationConfiguration operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutReplicationConfiguration for more information on using the PutReplicationConfiguration
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutReplicationConfigurationRequest method.
-// req, resp := client.PutReplicationConfigurationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutReplicationConfiguration
-func (c *ECR) PutReplicationConfigurationRequest(input *PutReplicationConfigurationInput) (req *request.Request, output *PutReplicationConfigurationOutput) {
- op := &request.Operation{
- Name: opPutReplicationConfiguration,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutReplicationConfigurationInput{}
- }
-
- output = &PutReplicationConfigurationOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// PutReplicationConfiguration API operation for Amazon EC2 Container Registry.
-//
-// Creates or updates the replication configuration for a registry. The existing
-// replication configuration for a repository can be retrieved with the DescribeRegistry
-// API action. The first time the PutReplicationConfiguration API is called,
-// a service-linked IAM role is created in your account for the replication
-// process. For more information, see Using service-linked roles for Amazon
-// ECR (https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// When configuring cross-account replication, the destination account must
-// grant the source account permission to replicate. This permission is controlled
-// using a registry permissions policy. For more information, see PutRegistryPolicy.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation PutReplicationConfiguration for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutReplicationConfiguration
-func (c *ECR) PutReplicationConfiguration(input *PutReplicationConfigurationInput) (*PutReplicationConfigurationOutput, error) {
- req, out := c.PutReplicationConfigurationRequest(input)
- return out, req.Send()
-}
-
-// PutReplicationConfigurationWithContext is the same as PutReplicationConfiguration with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutReplicationConfiguration for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) PutReplicationConfigurationWithContext(ctx aws.Context, input *PutReplicationConfigurationInput, opts ...request.Option) (*PutReplicationConfigurationOutput, error) {
- req, out := c.PutReplicationConfigurationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opSetRepositoryPolicy = "SetRepositoryPolicy"
-
-// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the
-// client's request for the SetRepositoryPolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the SetRepositoryPolicyRequest method.
-// req, resp := client.SetRepositoryPolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy
-func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) {
- op := &request.Operation{
- Name: opSetRepositoryPolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &SetRepositoryPolicyInput{}
- }
-
- output = &SetRepositoryPolicyOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// SetRepositoryPolicy API operation for Amazon EC2 Container Registry.
-//
-// Applies a repository policy to the specified repository to control access
-// permissions. For more information, see Amazon ECR Repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation SetRepositoryPolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy
-func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) {
- req, out := c.SetRepositoryPolicyRequest(input)
- return out, req.Send()
-}
-
-// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See SetRepositoryPolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) {
- req, out := c.SetRepositoryPolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opStartImageScan = "StartImageScan"
-
-// StartImageScanRequest generates a "aws/request.Request" representing the
-// client's request for the StartImageScan operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See StartImageScan for more information on using the StartImageScan
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the StartImageScanRequest method.
-// req, resp := client.StartImageScanRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan
-func (c *ECR) StartImageScanRequest(input *StartImageScanInput) (req *request.Request, output *StartImageScanOutput) {
- op := &request.Operation{
- Name: opStartImageScan,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &StartImageScanInput{}
- }
-
- output = &StartImageScanOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// StartImageScan API operation for Amazon EC2 Container Registry.
-//
-// Starts an image vulnerability scan. An image scan can only be started once
-// per 24 hours on an individual image. This limit includes if an image was
-// scanned on initial push. For more information, see Image scanning (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation StartImageScan for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - UnsupportedImageTypeException
-// The image is of a type that cannot be scanned.
-//
-// - LimitExceededException
-// The operation did not succeed because it would have exceeded a service limit
-// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ImageNotFoundException
-// The image requested does not exist in the specified repository.
-//
-// - ValidationException
-// There was an exception validating this request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan
-func (c *ECR) StartImageScan(input *StartImageScanInput) (*StartImageScanOutput, error) {
- req, out := c.StartImageScanRequest(input)
- return out, req.Send()
-}
-
-// StartImageScanWithContext is the same as StartImageScan with the addition of
-// the ability to pass a context and additional request options.
-//
-// See StartImageScan for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) StartImageScanWithContext(ctx aws.Context, input *StartImageScanInput, opts ...request.Option) (*StartImageScanOutput, error) {
- req, out := c.StartImageScanRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview"
-
-// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the
-// client's request for the StartLifecyclePolicyPreview operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the StartLifecyclePolicyPreviewRequest method.
-// req, resp := client.StartLifecyclePolicyPreviewRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview
-func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) {
- op := &request.Operation{
- Name: opStartLifecyclePolicyPreview,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &StartLifecyclePolicyPreviewInput{}
- }
-
- output = &StartLifecyclePolicyPreviewOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry.
-//
-// Starts a preview of a lifecycle policy for the specified repository. This
-// allows you to see the results before associating the lifecycle policy with
-// the repository.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation StartLifecyclePolicyPreview for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - LifecyclePolicyNotFoundException
-// The lifecycle policy could not be found, and no policy is set to the repository.
-//
-// - LifecyclePolicyPreviewInProgressException
-// The previous lifecycle policy preview request has not completed. Wait and
-// try again.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview
-func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) {
- req, out := c.StartLifecyclePolicyPreviewRequest(input)
- return out, req.Send()
-}
-
-// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of
-// the ability to pass a context and additional request options.
-//
-// See StartLifecyclePolicyPreview for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) {
- req, out := c.StartLifecyclePolicyPreviewRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opTagResource = "TagResource"
-
-// TagResourceRequest generates a "aws/request.Request" representing the
-// client's request for the TagResource operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See TagResource for more information on using the TagResource
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the TagResourceRequest method.
-// req, resp := client.TagResourceRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/TagResource
-func (c *ECR) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
- op := &request.Operation{
- Name: opTagResource,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &TagResourceInput{}
- }
-
- output = &TagResourceOutput{}
- req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
- return
-}
-
-// TagResource API operation for Amazon EC2 Container Registry.
-//
-// Adds specified tags to a resource with the specified ARN. Existing tags on
-// a resource are not changed if they are not specified in the request parameters.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation TagResource for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - InvalidTagParameterException
-// An invalid parameter has been specified. Tag keys can have a maximum character
-// length of 128 characters, and tag values can have a maximum length of 256
-// characters.
-//
-// - TooManyTagsException
-// The list of tags on the repository is over the limit. The maximum number
-// of tags that can be applied to a repository is 50.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/TagResource
-func (c *ECR) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
- req, out := c.TagResourceRequest(input)
- return out, req.Send()
-}
-
-// TagResourceWithContext is the same as TagResource with the addition of
-// the ability to pass a context and additional request options.
-//
-// See TagResource for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
- req, out := c.TagResourceRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUntagResource = "UntagResource"
-
-// UntagResourceRequest generates a "aws/request.Request" representing the
-// client's request for the UntagResource operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UntagResource for more information on using the UntagResource
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UntagResourceRequest method.
-// req, resp := client.UntagResourceRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UntagResource
-func (c *ECR) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
- op := &request.Operation{
- Name: opUntagResource,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UntagResourceInput{}
- }
-
- output = &UntagResourceOutput{}
- req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
- return
-}
-
-// UntagResource API operation for Amazon EC2 Container Registry.
-//
-// Deletes specified tags from a resource.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation UntagResource for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - InvalidTagParameterException
-// An invalid parameter has been specified. Tag keys can have a maximum character
-// length of 128 characters, and tag values can have a maximum length of 256
-// characters.
-//
-// - TooManyTagsException
-// The list of tags on the repository is over the limit. The maximum number
-// of tags that can be applied to a repository is 50.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UntagResource
-func (c *ECR) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
- req, out := c.UntagResourceRequest(input)
- return out, req.Send()
-}
-
-// UntagResourceWithContext is the same as UntagResource with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UntagResource for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
- req, out := c.UntagResourceRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUploadLayerPart = "UploadLayerPart"
-
-// UploadLayerPartRequest generates a "aws/request.Request" representing the
-// client's request for the UploadLayerPart operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UploadLayerPart for more information on using the UploadLayerPart
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UploadLayerPartRequest method.
-// req, resp := client.UploadLayerPartRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart
-func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) {
- op := &request.Operation{
- Name: opUploadLayerPart,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UploadLayerPartInput{}
- }
-
- output = &UploadLayerPartOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// UploadLayerPart API operation for Amazon EC2 Container Registry.
-//
-// Uploads an image layer part to Amazon ECR.
-//
-// When an image is pushed, each new image layer is uploaded in parts. The maximum
-// size of each image layer part can be 20971520 bytes (or about 20MB). The
-// UploadLayerPart API is called once per each new image layer part.
-//
-// This operation is used by the Amazon ECR proxy and is not generally used
-// by customers for pulling and pushing images. In most cases, you should use
-// the docker CLI to pull, tag, and push images.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon EC2 Container Registry's
-// API operation UploadLayerPart for usage and error information.
-//
-// Returned Error Types:
-//
-// - ServerException
-// These errors are usually caused by a server-side issue.
-//
-// - InvalidParameterException
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-//
-// - InvalidLayerPartException
-// The layer part size is not valid, or the first byte specified is not consecutive
-// to the last byte of a previous layer part upload.
-//
-// - RepositoryNotFoundException
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-//
-// - UploadNotFoundException
-// The upload could not be found, or the specified upload ID is not valid for
-// this repository.
-//
-// - LimitExceededException
-// The operation did not succeed because it would have exceeded a service limit
-// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
-// in the Amazon Elastic Container Registry User Guide.
-//
-// - KmsException
-// The operation failed due to a KMS exception.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart
-func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) {
- req, out := c.UploadLayerPartRequest(input)
- return out, req.Send()
-}
-
-// UploadLayerPartWithContext is the same as UploadLayerPart with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UploadLayerPart for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPartInput, opts ...request.Option) (*UploadLayerPartOutput, error) {
- req, out := c.UploadLayerPartRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// This data type is used in the ImageScanFinding data type.
-type Attribute struct {
- _ struct{} `type:"structure"`
-
- // The attribute key.
- //
- // Key is a required field
- Key *string `locationName:"key" min:"1" type:"string" required:"true"`
-
- // The value assigned to the attribute key.
- Value *string `locationName:"value" min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Attribute) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Attribute) GoString() string {
- return s.String()
-}
-
-// SetKey sets the Key field's value.
-func (s *Attribute) SetKey(v string) *Attribute {
- s.Key = &v
- return s
-}
-
-// SetValue sets the Value field's value.
-func (s *Attribute) SetValue(v string) *Attribute {
- s.Value = &v
- return s
-}
-
-// An object representing authorization data for an Amazon ECR registry.
-type AuthorizationData struct {
- _ struct{} `type:"structure"`
-
- // A base64-encoded string that contains authorization data for the specified
- // Amazon ECR registry. When the string is decoded, it is presented in the format
- // user:password for private registry authentication using docker login.
- AuthorizationToken *string `locationName:"authorizationToken" type:"string"`
-
- // The Unix time in seconds and milliseconds when the authorization token expires.
- // Authorization tokens are valid for 12 hours.
- ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp"`
-
- // The registry URL to use for this authorization token in a docker login command.
- // The Amazon ECR registry URL format is https://aws_account_id.dkr.ecr.region.amazonaws.com.
- // For example, https://012345678910.dkr.ecr.us-east-1.amazonaws.com..
- ProxyEndpoint *string `locationName:"proxyEndpoint" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AuthorizationData) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AuthorizationData) GoString() string {
- return s.String()
-}
-
-// SetAuthorizationToken sets the AuthorizationToken field's value.
-func (s *AuthorizationData) SetAuthorizationToken(v string) *AuthorizationData {
- s.AuthorizationToken = &v
- return s
-}
-
-// SetExpiresAt sets the ExpiresAt field's value.
-func (s *AuthorizationData) SetExpiresAt(v time.Time) *AuthorizationData {
- s.ExpiresAt = &v
- return s
-}
-
-// SetProxyEndpoint sets the ProxyEndpoint field's value.
-func (s *AuthorizationData) SetProxyEndpoint(v string) *AuthorizationData {
- s.ProxyEndpoint = &v
- return s
-}
-
-// The image details of the Amazon ECR container image.
-type AwsEcrContainerImageDetails struct {
- _ struct{} `type:"structure"`
-
- // The architecture of the Amazon ECR container image.
- Architecture *string `locationName:"architecture" type:"string"`
-
- // The image author of the Amazon ECR container image.
- Author *string `locationName:"author" type:"string"`
-
- // The image hash of the Amazon ECR container image.
- ImageHash *string `locationName:"imageHash" type:"string"`
-
- // The image tags attached to the Amazon ECR container image.
- ImageTags []*string `locationName:"imageTags" type:"list"`
-
- // The platform of the Amazon ECR container image.
- Platform *string `locationName:"platform" type:"string"`
-
- // The date and time the Amazon ECR container image was pushed.
- PushedAt *time.Time `locationName:"pushedAt" type:"timestamp"`
-
- // The registry the Amazon ECR container image belongs to.
- Registry *string `locationName:"registry" type:"string"`
-
- // The name of the repository the Amazon ECR container image resides in.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AwsEcrContainerImageDetails) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AwsEcrContainerImageDetails) GoString() string {
- return s.String()
-}
-
-// SetArchitecture sets the Architecture field's value.
-func (s *AwsEcrContainerImageDetails) SetArchitecture(v string) *AwsEcrContainerImageDetails {
- s.Architecture = &v
- return s
-}
-
-// SetAuthor sets the Author field's value.
-func (s *AwsEcrContainerImageDetails) SetAuthor(v string) *AwsEcrContainerImageDetails {
- s.Author = &v
- return s
-}
-
-// SetImageHash sets the ImageHash field's value.
-func (s *AwsEcrContainerImageDetails) SetImageHash(v string) *AwsEcrContainerImageDetails {
- s.ImageHash = &v
- return s
-}
-
-// SetImageTags sets the ImageTags field's value.
-func (s *AwsEcrContainerImageDetails) SetImageTags(v []*string) *AwsEcrContainerImageDetails {
- s.ImageTags = v
- return s
-}
-
-// SetPlatform sets the Platform field's value.
-func (s *AwsEcrContainerImageDetails) SetPlatform(v string) *AwsEcrContainerImageDetails {
- s.Platform = &v
- return s
-}
-
-// SetPushedAt sets the PushedAt field's value.
-func (s *AwsEcrContainerImageDetails) SetPushedAt(v time.Time) *AwsEcrContainerImageDetails {
- s.PushedAt = &v
- return s
-}
-
-// SetRegistry sets the Registry field's value.
-func (s *AwsEcrContainerImageDetails) SetRegistry(v string) *AwsEcrContainerImageDetails {
- s.Registry = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *AwsEcrContainerImageDetails) SetRepositoryName(v string) *AwsEcrContainerImageDetails {
- s.RepositoryName = &v
- return s
-}
-
-type BatchCheckLayerAvailabilityInput struct {
- _ struct{} `type:"structure"`
-
- // The digests of the image layers to check.
- //
- // LayerDigests is a required field
- LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the image layers to check. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository that is associated with the image layers to check.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchCheckLayerAvailabilityInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchCheckLayerAvailabilityInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *BatchCheckLayerAvailabilityInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "BatchCheckLayerAvailabilityInput"}
- if s.LayerDigests == nil {
- invalidParams.Add(request.NewErrParamRequired("LayerDigests"))
- }
- if s.LayerDigests != nil && len(s.LayerDigests) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("LayerDigests", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetLayerDigests sets the LayerDigests field's value.
-func (s *BatchCheckLayerAvailabilityInput) SetLayerDigests(v []*string) *BatchCheckLayerAvailabilityInput {
- s.LayerDigests = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *BatchCheckLayerAvailabilityInput) SetRegistryId(v string) *BatchCheckLayerAvailabilityInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *BatchCheckLayerAvailabilityInput) SetRepositoryName(v string) *BatchCheckLayerAvailabilityInput {
- s.RepositoryName = &v
- return s
-}
-
-type BatchCheckLayerAvailabilityOutput struct {
- _ struct{} `type:"structure"`
-
- // Any failures associated with the call.
- Failures []*LayerFailure `locationName:"failures" type:"list"`
-
- // A list of image layer objects corresponding to the image layer references
- // in the request.
- Layers []*Layer `locationName:"layers" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchCheckLayerAvailabilityOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchCheckLayerAvailabilityOutput) GoString() string {
- return s.String()
-}
-
-// SetFailures sets the Failures field's value.
-func (s *BatchCheckLayerAvailabilityOutput) SetFailures(v []*LayerFailure) *BatchCheckLayerAvailabilityOutput {
- s.Failures = v
- return s
-}
-
-// SetLayers sets the Layers field's value.
-func (s *BatchCheckLayerAvailabilityOutput) SetLayers(v []*Layer) *BatchCheckLayerAvailabilityOutput {
- s.Layers = v
- return s
-}
-
-// Deletes specified images within a specified repository. Images are specified
-// with either the imageTag or imageDigest.
-type BatchDeleteImageInput struct {
- _ struct{} `type:"structure"`
-
- // A list of image ID references that correspond to images to delete. The format
- // of the imageIds reference is imageTag=tag or imageDigest=digest.
- //
- // ImageIds is a required field
- ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the image to delete. If you do not specify a registry, the default registry
- // is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository that contains the image to delete.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchDeleteImageInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchDeleteImageInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *BatchDeleteImageInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "BatchDeleteImageInput"}
- if s.ImageIds == nil {
- invalidParams.Add(request.NewErrParamRequired("ImageIds"))
- }
- if s.ImageIds != nil && len(s.ImageIds) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.ImageIds != nil {
- for i, v := range s.ImageIds {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImageIds sets the ImageIds field's value.
-func (s *BatchDeleteImageInput) SetImageIds(v []*ImageIdentifier) *BatchDeleteImageInput {
- s.ImageIds = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *BatchDeleteImageInput) SetRegistryId(v string) *BatchDeleteImageInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *BatchDeleteImageInput) SetRepositoryName(v string) *BatchDeleteImageInput {
- s.RepositoryName = &v
- return s
-}
-
-type BatchDeleteImageOutput struct {
- _ struct{} `type:"structure"`
-
- // Any failures associated with the call.
- Failures []*ImageFailure `locationName:"failures" type:"list"`
-
- // The image IDs of the deleted images.
- ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchDeleteImageOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchDeleteImageOutput) GoString() string {
- return s.String()
-}
-
-// SetFailures sets the Failures field's value.
-func (s *BatchDeleteImageOutput) SetFailures(v []*ImageFailure) *BatchDeleteImageOutput {
- s.Failures = v
- return s
-}
-
-// SetImageIds sets the ImageIds field's value.
-func (s *BatchDeleteImageOutput) SetImageIds(v []*ImageIdentifier) *BatchDeleteImageOutput {
- s.ImageIds = v
- return s
-}
-
-type BatchGetImageInput struct {
- _ struct{} `type:"structure"`
-
- // The accepted media types for the request.
- //
- // Valid values: application/vnd.docker.distribution.manifest.v1+json | application/vnd.docker.distribution.manifest.v2+json
- // | application/vnd.oci.image.manifest.v1+json
- AcceptedMediaTypes []*string `locationName:"acceptedMediaTypes" min:"1" type:"list"`
-
- // A list of image ID references that correspond to images to describe. The
- // format of the imageIds reference is imageTag=tag or imageDigest=digest.
- //
- // ImageIds is a required field
- ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the images to describe. If you do not specify a registry, the default registry
- // is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository that contains the images to describe.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetImageInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetImageInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *BatchGetImageInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "BatchGetImageInput"}
- if s.AcceptedMediaTypes != nil && len(s.AcceptedMediaTypes) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AcceptedMediaTypes", 1))
- }
- if s.ImageIds == nil {
- invalidParams.Add(request.NewErrParamRequired("ImageIds"))
- }
- if s.ImageIds != nil && len(s.ImageIds) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.ImageIds != nil {
- for i, v := range s.ImageIds {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAcceptedMediaTypes sets the AcceptedMediaTypes field's value.
-func (s *BatchGetImageInput) SetAcceptedMediaTypes(v []*string) *BatchGetImageInput {
- s.AcceptedMediaTypes = v
- return s
-}
-
-// SetImageIds sets the ImageIds field's value.
-func (s *BatchGetImageInput) SetImageIds(v []*ImageIdentifier) *BatchGetImageInput {
- s.ImageIds = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *BatchGetImageInput) SetRegistryId(v string) *BatchGetImageInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *BatchGetImageInput) SetRepositoryName(v string) *BatchGetImageInput {
- s.RepositoryName = &v
- return s
-}
-
-type BatchGetImageOutput struct {
- _ struct{} `type:"structure"`
-
- // Any failures associated with the call.
- Failures []*ImageFailure `locationName:"failures" type:"list"`
-
- // A list of image objects corresponding to the image references in the request.
- Images []*Image `locationName:"images" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetImageOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetImageOutput) GoString() string {
- return s.String()
-}
-
-// SetFailures sets the Failures field's value.
-func (s *BatchGetImageOutput) SetFailures(v []*ImageFailure) *BatchGetImageOutput {
- s.Failures = v
- return s
-}
-
-// SetImages sets the Images field's value.
-func (s *BatchGetImageOutput) SetImages(v []*Image) *BatchGetImageOutput {
- s.Images = v
- return s
-}
-
-type BatchGetRepositoryScanningConfigurationInput struct {
- _ struct{} `type:"structure"`
-
- // One or more repository names to get the scanning configuration for.
- //
- // RepositoryNames is a required field
- RepositoryNames []*string `locationName:"repositoryNames" min:"1" type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetRepositoryScanningConfigurationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetRepositoryScanningConfigurationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *BatchGetRepositoryScanningConfigurationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "BatchGetRepositoryScanningConfigurationInput"}
- if s.RepositoryNames == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryNames"))
- }
- if s.RepositoryNames != nil && len(s.RepositoryNames) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryNames", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRepositoryNames sets the RepositoryNames field's value.
-func (s *BatchGetRepositoryScanningConfigurationInput) SetRepositoryNames(v []*string) *BatchGetRepositoryScanningConfigurationInput {
- s.RepositoryNames = v
- return s
-}
-
-type BatchGetRepositoryScanningConfigurationOutput struct {
- _ struct{} `type:"structure"`
-
- // Any failures associated with the call.
- Failures []*RepositoryScanningConfigurationFailure `locationName:"failures" type:"list"`
-
- // The scanning configuration for the requested repositories.
- ScanningConfigurations []*RepositoryScanningConfiguration `locationName:"scanningConfigurations" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetRepositoryScanningConfigurationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetRepositoryScanningConfigurationOutput) GoString() string {
- return s.String()
-}
-
-// SetFailures sets the Failures field's value.
-func (s *BatchGetRepositoryScanningConfigurationOutput) SetFailures(v []*RepositoryScanningConfigurationFailure) *BatchGetRepositoryScanningConfigurationOutput {
- s.Failures = v
- return s
-}
-
-// SetScanningConfigurations sets the ScanningConfigurations field's value.
-func (s *BatchGetRepositoryScanningConfigurationOutput) SetScanningConfigurations(v []*RepositoryScanningConfiguration) *BatchGetRepositoryScanningConfigurationOutput {
- s.ScanningConfigurations = v
- return s
-}
-
-type CompleteLayerUploadInput struct {
- _ struct{} `type:"structure"`
-
- // The sha256 digest of the image layer.
- //
- // LayerDigests is a required field
- LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry to which
- // to upload layers. If you do not specify a registry, the default registry
- // is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository to associate with the image layer.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-
- // The upload ID from a previous InitiateLayerUpload operation to associate
- // with the image layer.
- //
- // UploadId is a required field
- UploadId *string `locationName:"uploadId" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CompleteLayerUploadInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CompleteLayerUploadInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CompleteLayerUploadInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CompleteLayerUploadInput"}
- if s.LayerDigests == nil {
- invalidParams.Add(request.NewErrParamRequired("LayerDigests"))
- }
- if s.LayerDigests != nil && len(s.LayerDigests) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("LayerDigests", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.UploadId == nil {
- invalidParams.Add(request.NewErrParamRequired("UploadId"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetLayerDigests sets the LayerDigests field's value.
-func (s *CompleteLayerUploadInput) SetLayerDigests(v []*string) *CompleteLayerUploadInput {
- s.LayerDigests = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *CompleteLayerUploadInput) SetRegistryId(v string) *CompleteLayerUploadInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *CompleteLayerUploadInput) SetRepositoryName(v string) *CompleteLayerUploadInput {
- s.RepositoryName = &v
- return s
-}
-
-// SetUploadId sets the UploadId field's value.
-func (s *CompleteLayerUploadInput) SetUploadId(v string) *CompleteLayerUploadInput {
- s.UploadId = &v
- return s
-}
-
-type CompleteLayerUploadOutput struct {
- _ struct{} `type:"structure"`
-
- // The sha256 digest of the image layer.
- LayerDigest *string `locationName:"layerDigest" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-
- // The upload ID associated with the layer.
- UploadId *string `locationName:"uploadId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CompleteLayerUploadOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CompleteLayerUploadOutput) GoString() string {
- return s.String()
-}
-
-// SetLayerDigest sets the LayerDigest field's value.
-func (s *CompleteLayerUploadOutput) SetLayerDigest(v string) *CompleteLayerUploadOutput {
- s.LayerDigest = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *CompleteLayerUploadOutput) SetRegistryId(v string) *CompleteLayerUploadOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *CompleteLayerUploadOutput) SetRepositoryName(v string) *CompleteLayerUploadOutput {
- s.RepositoryName = &v
- return s
-}
-
-// SetUploadId sets the UploadId field's value.
-func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOutput {
- s.UploadId = &v
- return s
-}
-
-type CreatePullThroughCacheRuleInput struct {
- _ struct{} `type:"structure"`
-
- // The repository name prefix to use when caching images from the source registry.
- //
- // EcrRepositoryPrefix is a required field
- EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry to create
- // the pull through cache rule for. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The registry URL of the upstream public registry to use as the source for
- // the pull through cache rule.
- //
- // UpstreamRegistryUrl is a required field
- UpstreamRegistryUrl *string `locationName:"upstreamRegistryUrl" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreatePullThroughCacheRuleInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreatePullThroughCacheRuleInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreatePullThroughCacheRuleInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreatePullThroughCacheRuleInput"}
- if s.EcrRepositoryPrefix == nil {
- invalidParams.Add(request.NewErrParamRequired("EcrRepositoryPrefix"))
- }
- if s.EcrRepositoryPrefix != nil && len(*s.EcrRepositoryPrefix) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("EcrRepositoryPrefix", 2))
- }
- if s.UpstreamRegistryUrl == nil {
- invalidParams.Add(request.NewErrParamRequired("UpstreamRegistryUrl"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value.
-func (s *CreatePullThroughCacheRuleInput) SetEcrRepositoryPrefix(v string) *CreatePullThroughCacheRuleInput {
- s.EcrRepositoryPrefix = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *CreatePullThroughCacheRuleInput) SetRegistryId(v string) *CreatePullThroughCacheRuleInput {
- s.RegistryId = &v
- return s
-}
-
-// SetUpstreamRegistryUrl sets the UpstreamRegistryUrl field's value.
-func (s *CreatePullThroughCacheRuleInput) SetUpstreamRegistryUrl(v string) *CreatePullThroughCacheRuleInput {
- s.UpstreamRegistryUrl = &v
- return s
-}
-
-type CreatePullThroughCacheRuleOutput struct {
- _ struct{} `type:"structure"`
-
- // The date and time, in JavaScript date format, when the pull through cache
- // rule was created.
- CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
-
- // The Amazon ECR repository prefix associated with the pull through cache rule.
- EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The upstream registry URL associated with the pull through cache rule.
- UpstreamRegistryUrl *string `locationName:"upstreamRegistryUrl" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreatePullThroughCacheRuleOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreatePullThroughCacheRuleOutput) GoString() string {
- return s.String()
-}
-
-// SetCreatedAt sets the CreatedAt field's value.
-func (s *CreatePullThroughCacheRuleOutput) SetCreatedAt(v time.Time) *CreatePullThroughCacheRuleOutput {
- s.CreatedAt = &v
- return s
-}
-
-// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value.
-func (s *CreatePullThroughCacheRuleOutput) SetEcrRepositoryPrefix(v string) *CreatePullThroughCacheRuleOutput {
- s.EcrRepositoryPrefix = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *CreatePullThroughCacheRuleOutput) SetRegistryId(v string) *CreatePullThroughCacheRuleOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetUpstreamRegistryUrl sets the UpstreamRegistryUrl field's value.
-func (s *CreatePullThroughCacheRuleOutput) SetUpstreamRegistryUrl(v string) *CreatePullThroughCacheRuleOutput {
- s.UpstreamRegistryUrl = &v
- return s
-}
-
-type CreateRepositoryInput struct {
- _ struct{} `type:"structure"`
-
- // The encryption configuration for the repository. This determines how the
- // contents of your repository are encrypted at rest.
- EncryptionConfiguration *EncryptionConfiguration `locationName:"encryptionConfiguration" type:"structure"`
-
- // The image scanning configuration for the repository. This determines whether
- // images are scanned for known vulnerabilities after being pushed to the repository.
- ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"`
-
- // The tag mutability setting for the repository. If this parameter is omitted,
- // the default setting of MUTABLE will be used which will allow image tags to
- // be overwritten. If IMMUTABLE is specified, all image tags within the repository
- // will be immutable which will prevent them from being overwritten.
- ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"`
-
- // The Amazon Web Services account ID associated with the registry to create
- // the repository. If you do not specify a registry, the default registry is
- // assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name to use for the repository. The repository name may be specified
- // on its own (such as nginx-web-app) or it can be prepended with a namespace
- // to group the repository into a category (such as project-a/nginx-web-app).
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-
- // The metadata that you apply to the repository to help you categorize and
- // organize them. Each tag consists of a key and an optional value, both of
- // which you define. Tag keys can have a maximum character length of 128 characters,
- // and tag values can have a maximum length of 256 characters.
- Tags []*Tag `locationName:"tags" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateRepositoryInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateRepositoryInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateRepositoryInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateRepositoryInput"}
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.EncryptionConfiguration != nil {
- if err := s.EncryptionConfiguration.Validate(); err != nil {
- invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
-func (s *CreateRepositoryInput) SetEncryptionConfiguration(v *EncryptionConfiguration) *CreateRepositoryInput {
- s.EncryptionConfiguration = v
- return s
-}
-
-// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value.
-func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput {
- s.ImageScanningConfiguration = v
- return s
-}
-
-// SetImageTagMutability sets the ImageTagMutability field's value.
-func (s *CreateRepositoryInput) SetImageTagMutability(v string) *CreateRepositoryInput {
- s.ImageTagMutability = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *CreateRepositoryInput) SetRegistryId(v string) *CreateRepositoryInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *CreateRepositoryInput) SetRepositoryName(v string) *CreateRepositoryInput {
- s.RepositoryName = &v
- return s
-}
-
-// SetTags sets the Tags field's value.
-func (s *CreateRepositoryInput) SetTags(v []*Tag) *CreateRepositoryInput {
- s.Tags = v
- return s
-}
-
-type CreateRepositoryOutput struct {
- _ struct{} `type:"structure"`
-
- // The repository that was created.
- Repository *Repository `locationName:"repository" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateRepositoryOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateRepositoryOutput) GoString() string {
- return s.String()
-}
-
-// SetRepository sets the Repository field's value.
-func (s *CreateRepositoryOutput) SetRepository(v *Repository) *CreateRepositoryOutput {
- s.Repository = v
- return s
-}
-
-// The CVSS score for a finding.
-type CvssScore struct {
- _ struct{} `type:"structure"`
-
- // The base CVSS score used for the finding.
- BaseScore *float64 `locationName:"baseScore" type:"double"`
-
- // The vector string of the CVSS score.
- ScoringVector *string `locationName:"scoringVector" type:"string"`
-
- // The source of the CVSS score.
- Source *string `locationName:"source" type:"string"`
-
- // The version of CVSS used for the score.
- Version *string `locationName:"version" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CvssScore) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CvssScore) GoString() string {
- return s.String()
-}
-
-// SetBaseScore sets the BaseScore field's value.
-func (s *CvssScore) SetBaseScore(v float64) *CvssScore {
- s.BaseScore = &v
- return s
-}
-
-// SetScoringVector sets the ScoringVector field's value.
-func (s *CvssScore) SetScoringVector(v string) *CvssScore {
- s.ScoringVector = &v
- return s
-}
-
-// SetSource sets the Source field's value.
-func (s *CvssScore) SetSource(v string) *CvssScore {
- s.Source = &v
- return s
-}
-
-// SetVersion sets the Version field's value.
-func (s *CvssScore) SetVersion(v string) *CvssScore {
- s.Version = &v
- return s
-}
-
-// Details on adjustments Amazon Inspector made to the CVSS score for a finding.
-type CvssScoreAdjustment struct {
- _ struct{} `type:"structure"`
-
- // The metric used to adjust the CVSS score.
- Metric *string `locationName:"metric" type:"string"`
-
- // The reason the CVSS score has been adjustment.
- Reason *string `locationName:"reason" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CvssScoreAdjustment) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CvssScoreAdjustment) GoString() string {
- return s.String()
-}
-
-// SetMetric sets the Metric field's value.
-func (s *CvssScoreAdjustment) SetMetric(v string) *CvssScoreAdjustment {
- s.Metric = &v
- return s
-}
-
-// SetReason sets the Reason field's value.
-func (s *CvssScoreAdjustment) SetReason(v string) *CvssScoreAdjustment {
- s.Reason = &v
- return s
-}
-
-// Information about the CVSS score.
-type CvssScoreDetails struct {
- _ struct{} `type:"structure"`
-
- // An object that contains details about adjustment Amazon Inspector made to
- // the CVSS score.
- Adjustments []*CvssScoreAdjustment `locationName:"adjustments" type:"list"`
-
- // The CVSS score.
- Score *float64 `locationName:"score" type:"double"`
-
- // The source for the CVSS score.
- ScoreSource *string `locationName:"scoreSource" type:"string"`
-
- // The vector for the CVSS score.
- ScoringVector *string `locationName:"scoringVector" type:"string"`
-
- // The CVSS version used in scoring.
- Version *string `locationName:"version" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CvssScoreDetails) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CvssScoreDetails) GoString() string {
- return s.String()
-}
-
-// SetAdjustments sets the Adjustments field's value.
-func (s *CvssScoreDetails) SetAdjustments(v []*CvssScoreAdjustment) *CvssScoreDetails {
- s.Adjustments = v
- return s
-}
-
-// SetScore sets the Score field's value.
-func (s *CvssScoreDetails) SetScore(v float64) *CvssScoreDetails {
- s.Score = &v
- return s
-}
-
-// SetScoreSource sets the ScoreSource field's value.
-func (s *CvssScoreDetails) SetScoreSource(v string) *CvssScoreDetails {
- s.ScoreSource = &v
- return s
-}
-
-// SetScoringVector sets the ScoringVector field's value.
-func (s *CvssScoreDetails) SetScoringVector(v string) *CvssScoreDetails {
- s.ScoringVector = &v
- return s
-}
-
-// SetVersion sets the Version field's value.
-func (s *CvssScoreDetails) SetVersion(v string) *CvssScoreDetails {
- s.Version = &v
- return s
-}
-
-type DeleteLifecyclePolicyInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository. If you do not specify a registry, the default registry is
- // assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteLifecyclePolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteLifecyclePolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteLifecyclePolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteLifecyclePolicyInput"}
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DeleteLifecyclePolicyInput) SetRegistryId(v string) *DeleteLifecyclePolicyInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DeleteLifecyclePolicyInput) SetRepositoryName(v string) *DeleteLifecyclePolicyInput {
- s.RepositoryName = &v
- return s
-}
-
-type DeleteLifecyclePolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The time stamp of the last time that the lifecycle policy was run.
- LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp"`
-
- // The JSON lifecycle policy text.
- LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteLifecyclePolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteLifecyclePolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetLastEvaluatedAt sets the LastEvaluatedAt field's value.
-func (s *DeleteLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *DeleteLifecyclePolicyOutput {
- s.LastEvaluatedAt = &v
- return s
-}
-
-// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
-func (s *DeleteLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *DeleteLifecyclePolicyOutput {
- s.LifecyclePolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DeleteLifecyclePolicyOutput) SetRegistryId(v string) *DeleteLifecyclePolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DeleteLifecyclePolicyOutput) SetRepositoryName(v string) *DeleteLifecyclePolicyOutput {
- s.RepositoryName = &v
- return s
-}
-
-type DeletePullThroughCacheRuleInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon ECR repository prefix associated with the pull through cache rule
- // to delete.
- //
- // EcrRepositoryPrefix is a required field
- EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the pull through cache rule. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeletePullThroughCacheRuleInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeletePullThroughCacheRuleInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeletePullThroughCacheRuleInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeletePullThroughCacheRuleInput"}
- if s.EcrRepositoryPrefix == nil {
- invalidParams.Add(request.NewErrParamRequired("EcrRepositoryPrefix"))
- }
- if s.EcrRepositoryPrefix != nil && len(*s.EcrRepositoryPrefix) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("EcrRepositoryPrefix", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value.
-func (s *DeletePullThroughCacheRuleInput) SetEcrRepositoryPrefix(v string) *DeletePullThroughCacheRuleInput {
- s.EcrRepositoryPrefix = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DeletePullThroughCacheRuleInput) SetRegistryId(v string) *DeletePullThroughCacheRuleInput {
- s.RegistryId = &v
- return s
-}
-
-type DeletePullThroughCacheRuleOutput struct {
- _ struct{} `type:"structure"`
-
- // The timestamp associated with the pull through cache rule.
- CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
-
- // The Amazon ECR repository prefix associated with the request.
- EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The upstream registry URL associated with the pull through cache rule.
- UpstreamRegistryUrl *string `locationName:"upstreamRegistryUrl" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeletePullThroughCacheRuleOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeletePullThroughCacheRuleOutput) GoString() string {
- return s.String()
-}
-
-// SetCreatedAt sets the CreatedAt field's value.
-func (s *DeletePullThroughCacheRuleOutput) SetCreatedAt(v time.Time) *DeletePullThroughCacheRuleOutput {
- s.CreatedAt = &v
- return s
-}
-
-// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value.
-func (s *DeletePullThroughCacheRuleOutput) SetEcrRepositoryPrefix(v string) *DeletePullThroughCacheRuleOutput {
- s.EcrRepositoryPrefix = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DeletePullThroughCacheRuleOutput) SetRegistryId(v string) *DeletePullThroughCacheRuleOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetUpstreamRegistryUrl sets the UpstreamRegistryUrl field's value.
-func (s *DeletePullThroughCacheRuleOutput) SetUpstreamRegistryUrl(v string) *DeletePullThroughCacheRuleOutput {
- s.UpstreamRegistryUrl = &v
- return s
-}
-
-type DeleteRegistryPolicyInput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRegistryPolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRegistryPolicyInput) GoString() string {
- return s.String()
-}
-
-type DeleteRegistryPolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The contents of the registry permissions policy that was deleted.
- PolicyText *string `locationName:"policyText" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRegistryPolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRegistryPolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetPolicyText sets the PolicyText field's value.
-func (s *DeleteRegistryPolicyOutput) SetPolicyText(v string) *DeleteRegistryPolicyOutput {
- s.PolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DeleteRegistryPolicyOutput) SetRegistryId(v string) *DeleteRegistryPolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-type DeleteRepositoryInput struct {
- _ struct{} `type:"structure"`
-
- // If a repository contains images, forces the deletion.
- Force *bool `locationName:"force" type:"boolean"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository to delete. If you do not specify a registry, the default registry
- // is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository to delete.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRepositoryInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRepositoryInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteRepositoryInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryInput"}
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetForce sets the Force field's value.
-func (s *DeleteRepositoryInput) SetForce(v bool) *DeleteRepositoryInput {
- s.Force = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DeleteRepositoryInput) SetRegistryId(v string) *DeleteRepositoryInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DeleteRepositoryInput) SetRepositoryName(v string) *DeleteRepositoryInput {
- s.RepositoryName = &v
- return s
-}
-
-type DeleteRepositoryOutput struct {
- _ struct{} `type:"structure"`
-
- // The repository that was deleted.
- Repository *Repository `locationName:"repository" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRepositoryOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRepositoryOutput) GoString() string {
- return s.String()
-}
-
-// SetRepository sets the Repository field's value.
-func (s *DeleteRepositoryOutput) SetRepository(v *Repository) *DeleteRepositoryOutput {
- s.Repository = v
- return s
-}
-
-type DeleteRepositoryPolicyInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository policy to delete. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository that is associated with the repository policy
- // to delete.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRepositoryPolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRepositoryPolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteRepositoryPolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryPolicyInput"}
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DeleteRepositoryPolicyInput) SetRegistryId(v string) *DeleteRepositoryPolicyInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DeleteRepositoryPolicyInput) SetRepositoryName(v string) *DeleteRepositoryPolicyInput {
- s.RepositoryName = &v
- return s
-}
-
-type DeleteRepositoryPolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The JSON repository policy that was deleted from the repository.
- PolicyText *string `locationName:"policyText" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRepositoryPolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRepositoryPolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetPolicyText sets the PolicyText field's value.
-func (s *DeleteRepositoryPolicyOutput) SetPolicyText(v string) *DeleteRepositoryPolicyOutput {
- s.PolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DeleteRepositoryPolicyOutput) SetRegistryId(v string) *DeleteRepositoryPolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DeleteRepositoryPolicyOutput) SetRepositoryName(v string) *DeleteRepositoryPolicyOutput {
- s.RepositoryName = &v
- return s
-}
-
-type DescribeImageReplicationStatusInput struct {
- _ struct{} `type:"structure"`
-
- // An object with identifying information for an image in an Amazon ECR repository.
- //
- // ImageId is a required field
- ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry. If you do
- // not specify a registry, the default registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository that the image is in.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImageReplicationStatusInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImageReplicationStatusInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeImageReplicationStatusInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeImageReplicationStatusInput"}
- if s.ImageId == nil {
- invalidParams.Add(request.NewErrParamRequired("ImageId"))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.ImageId != nil {
- if err := s.ImageId.Validate(); err != nil {
- invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImageId sets the ImageId field's value.
-func (s *DescribeImageReplicationStatusInput) SetImageId(v *ImageIdentifier) *DescribeImageReplicationStatusInput {
- s.ImageId = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DescribeImageReplicationStatusInput) SetRegistryId(v string) *DescribeImageReplicationStatusInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DescribeImageReplicationStatusInput) SetRepositoryName(v string) *DescribeImageReplicationStatusInput {
- s.RepositoryName = &v
- return s
-}
-
-type DescribeImageReplicationStatusOutput struct {
- _ struct{} `type:"structure"`
-
- // An object with identifying information for an image in an Amazon ECR repository.
- ImageId *ImageIdentifier `locationName:"imageId" type:"structure"`
-
- // The replication status details for the images in the specified repository.
- ReplicationStatuses []*ImageReplicationStatus `locationName:"replicationStatuses" type:"list"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImageReplicationStatusOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImageReplicationStatusOutput) GoString() string {
- return s.String()
-}
-
-// SetImageId sets the ImageId field's value.
-func (s *DescribeImageReplicationStatusOutput) SetImageId(v *ImageIdentifier) *DescribeImageReplicationStatusOutput {
- s.ImageId = v
- return s
-}
-
-// SetReplicationStatuses sets the ReplicationStatuses field's value.
-func (s *DescribeImageReplicationStatusOutput) SetReplicationStatuses(v []*ImageReplicationStatus) *DescribeImageReplicationStatusOutput {
- s.ReplicationStatuses = v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DescribeImageReplicationStatusOutput) SetRepositoryName(v string) *DescribeImageReplicationStatusOutput {
- s.RepositoryName = &v
- return s
-}
-
-type DescribeImageScanFindingsInput struct {
- _ struct{} `type:"structure"`
-
- // An object with identifying information for an image in an Amazon ECR repository.
- //
- // ImageId is a required field
- ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"`
-
- // The maximum number of image scan results returned by DescribeImageScanFindings
- // in paginated output. When this parameter is used, DescribeImageScanFindings
- // only returns maxResults results in a single page along with a nextToken response
- // element. The remaining results of the initial request can be seen by sending
- // another DescribeImageScanFindings request with the returned nextToken value.
- // This value can be between 1 and 1000. If this parameter is not used, then
- // DescribeImageScanFindings returns up to 100 results and a nextToken value,
- // if applicable.
- MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
-
- // The nextToken value returned from a previous paginated DescribeImageScanFindings
- // request where maxResults was used and the results exceeded the value of that
- // parameter. Pagination continues from the end of the previous results that
- // returned the nextToken value. This value is null when there are no more results
- // to return.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository in which to describe the image scan findings for. If you do
- // not specify a registry, the default registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository for the image for which to describe the scan findings.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImageScanFindingsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImageScanFindingsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeImageScanFindingsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeImageScanFindingsInput"}
- if s.ImageId == nil {
- invalidParams.Add(request.NewErrParamRequired("ImageId"))
- }
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.ImageId != nil {
- if err := s.ImageId.Validate(); err != nil {
- invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImageId sets the ImageId field's value.
-func (s *DescribeImageScanFindingsInput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsInput {
- s.ImageId = v
- return s
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *DescribeImageScanFindingsInput) SetMaxResults(v int64) *DescribeImageScanFindingsInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *DescribeImageScanFindingsInput) SetNextToken(v string) *DescribeImageScanFindingsInput {
- s.NextToken = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DescribeImageScanFindingsInput) SetRegistryId(v string) *DescribeImageScanFindingsInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DescribeImageScanFindingsInput) SetRepositoryName(v string) *DescribeImageScanFindingsInput {
- s.RepositoryName = &v
- return s
-}
-
-type DescribeImageScanFindingsOutput struct {
- _ struct{} `type:"structure"`
-
- // An object with identifying information for an image in an Amazon ECR repository.
- ImageId *ImageIdentifier `locationName:"imageId" type:"structure"`
-
- // The information contained in the image scan findings.
- ImageScanFindings *ImageScanFindings `locationName:"imageScanFindings" type:"structure"`
-
- // The current state of the scan.
- ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"`
-
- // The nextToken value to include in a future DescribeImageScanFindings request.
- // When the results of a DescribeImageScanFindings request exceed maxResults,
- // this value can be used to retrieve the next page of results. This value is
- // null when there are no more results to return.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImageScanFindingsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImageScanFindingsOutput) GoString() string {
- return s.String()
-}
-
-// SetImageId sets the ImageId field's value.
-func (s *DescribeImageScanFindingsOutput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsOutput {
- s.ImageId = v
- return s
-}
-
-// SetImageScanFindings sets the ImageScanFindings field's value.
-func (s *DescribeImageScanFindingsOutput) SetImageScanFindings(v *ImageScanFindings) *DescribeImageScanFindingsOutput {
- s.ImageScanFindings = v
- return s
-}
-
-// SetImageScanStatus sets the ImageScanStatus field's value.
-func (s *DescribeImageScanFindingsOutput) SetImageScanStatus(v *ImageScanStatus) *DescribeImageScanFindingsOutput {
- s.ImageScanStatus = v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *DescribeImageScanFindingsOutput) SetNextToken(v string) *DescribeImageScanFindingsOutput {
- s.NextToken = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DescribeImageScanFindingsOutput) SetRegistryId(v string) *DescribeImageScanFindingsOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DescribeImageScanFindingsOutput) SetRepositoryName(v string) *DescribeImageScanFindingsOutput {
- s.RepositoryName = &v
- return s
-}
-
-// An object representing a filter on a DescribeImages operation.
-type DescribeImagesFilter struct {
- _ struct{} `type:"structure"`
-
- // The tag status with which to filter your DescribeImages results. You can
- // filter results based on whether they are TAGGED or UNTAGGED.
- TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImagesFilter) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImagesFilter) GoString() string {
- return s.String()
-}
-
-// SetTagStatus sets the TagStatus field's value.
-func (s *DescribeImagesFilter) SetTagStatus(v string) *DescribeImagesFilter {
- s.TagStatus = &v
- return s
-}
-
-type DescribeImagesInput struct {
- _ struct{} `type:"structure"`
-
- // The filter key and value with which to filter your DescribeImages results.
- Filter *DescribeImagesFilter `locationName:"filter" type:"structure"`
-
- // The list of image IDs for the requested repository.
- ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
-
- // The maximum number of repository results returned by DescribeImages in paginated
- // output. When this parameter is used, DescribeImages only returns maxResults
- // results in a single page along with a nextToken response element. The remaining
- // results of the initial request can be seen by sending another DescribeImages
- // request with the returned nextToken value. This value can be between 1 and
- // 1000. If this parameter is not used, then DescribeImages returns up to 100
- // results and a nextToken value, if applicable. This option cannot be used
- // when you specify images with imageIds.
- MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
-
- // The nextToken value returned from a previous paginated DescribeImages request
- // where maxResults was used and the results exceeded the value of that parameter.
- // Pagination continues from the end of the previous results that returned the
- // nextToken value. This value is null when there are no more results to return.
- // This option cannot be used when you specify images with imageIds.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository in which to describe images. If you do not specify a registry,
- // the default registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository that contains the images to describe.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImagesInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImagesInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeImagesInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeImagesInput"}
- if s.ImageIds != nil && len(s.ImageIds) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1))
- }
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.ImageIds != nil {
- for i, v := range s.ImageIds {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetFilter sets the Filter field's value.
-func (s *DescribeImagesInput) SetFilter(v *DescribeImagesFilter) *DescribeImagesInput {
- s.Filter = v
- return s
-}
-
-// SetImageIds sets the ImageIds field's value.
-func (s *DescribeImagesInput) SetImageIds(v []*ImageIdentifier) *DescribeImagesInput {
- s.ImageIds = v
- return s
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *DescribeImagesInput) SetMaxResults(v int64) *DescribeImagesInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *DescribeImagesInput) SetNextToken(v string) *DescribeImagesInput {
- s.NextToken = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DescribeImagesInput) SetRegistryId(v string) *DescribeImagesInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *DescribeImagesInput) SetRepositoryName(v string) *DescribeImagesInput {
- s.RepositoryName = &v
- return s
-}
-
-type DescribeImagesOutput struct {
- _ struct{} `type:"structure"`
-
- // A list of ImageDetail objects that contain data about the image.
- ImageDetails []*ImageDetail `locationName:"imageDetails" type:"list"`
-
- // The nextToken value to include in a future DescribeImages request. When the
- // results of a DescribeImages request exceed maxResults, this value can be
- // used to retrieve the next page of results. This value is null when there
- // are no more results to return.
- NextToken *string `locationName:"nextToken" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImagesOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImagesOutput) GoString() string {
- return s.String()
-}
-
-// SetImageDetails sets the ImageDetails field's value.
-func (s *DescribeImagesOutput) SetImageDetails(v []*ImageDetail) *DescribeImagesOutput {
- s.ImageDetails = v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *DescribeImagesOutput) SetNextToken(v string) *DescribeImagesOutput {
- s.NextToken = &v
- return s
-}
-
-type DescribePullThroughCacheRulesInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon ECR repository prefixes associated with the pull through cache
- // rules to return. If no repository prefix value is specified, all pull through
- // cache rules are returned.
- EcrRepositoryPrefixes []*string `locationName:"ecrRepositoryPrefixes" min:"1" type:"list"`
-
- // The maximum number of pull through cache rules returned by DescribePullThroughCacheRulesRequest
- // in paginated output. When this parameter is used, DescribePullThroughCacheRulesRequest
- // only returns maxResults results in a single page along with a nextToken response
- // element. The remaining results of the initial request can be seen by sending
- // another DescribePullThroughCacheRulesRequest request with the returned nextToken
- // value. This value can be between 1 and 1000. If this parameter is not used,
- // then DescribePullThroughCacheRulesRequest returns up to 100 results and a
- // nextToken value, if applicable.
- MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
-
- // The nextToken value returned from a previous paginated DescribePullThroughCacheRulesRequest
- // request where maxResults was used and the results exceeded the value of that
- // parameter. Pagination continues from the end of the previous results that
- // returned the nextToken value. This value is null when there are no more results
- // to return.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry to return
- // the pull through cache rules for. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribePullThroughCacheRulesInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribePullThroughCacheRulesInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribePullThroughCacheRulesInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribePullThroughCacheRulesInput"}
- if s.EcrRepositoryPrefixes != nil && len(s.EcrRepositoryPrefixes) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("EcrRepositoryPrefixes", 1))
- }
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetEcrRepositoryPrefixes sets the EcrRepositoryPrefixes field's value.
-func (s *DescribePullThroughCacheRulesInput) SetEcrRepositoryPrefixes(v []*string) *DescribePullThroughCacheRulesInput {
- s.EcrRepositoryPrefixes = v
- return s
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *DescribePullThroughCacheRulesInput) SetMaxResults(v int64) *DescribePullThroughCacheRulesInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *DescribePullThroughCacheRulesInput) SetNextToken(v string) *DescribePullThroughCacheRulesInput {
- s.NextToken = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DescribePullThroughCacheRulesInput) SetRegistryId(v string) *DescribePullThroughCacheRulesInput {
- s.RegistryId = &v
- return s
-}
-
-type DescribePullThroughCacheRulesOutput struct {
- _ struct{} `type:"structure"`
-
- // The nextToken value to include in a future DescribePullThroughCacheRulesRequest
- // request. When the results of a DescribePullThroughCacheRulesRequest request
- // exceed maxResults, this value can be used to retrieve the next page of results.
- // This value is null when there are no more results to return.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The details of the pull through cache rules.
- PullThroughCacheRules []*PullThroughCacheRule `locationName:"pullThroughCacheRules" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribePullThroughCacheRulesOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribePullThroughCacheRulesOutput) GoString() string {
- return s.String()
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *DescribePullThroughCacheRulesOutput) SetNextToken(v string) *DescribePullThroughCacheRulesOutput {
- s.NextToken = &v
- return s
-}
-
-// SetPullThroughCacheRules sets the PullThroughCacheRules field's value.
-func (s *DescribePullThroughCacheRulesOutput) SetPullThroughCacheRules(v []*PullThroughCacheRule) *DescribePullThroughCacheRulesOutput {
- s.PullThroughCacheRules = v
- return s
-}
-
-type DescribeRegistryInput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeRegistryInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeRegistryInput) GoString() string {
- return s.String()
-}
-
-type DescribeRegistryOutput struct {
- _ struct{} `type:"structure"`
-
- // The ID of the registry.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The replication configuration for the registry.
- ReplicationConfiguration *ReplicationConfiguration `locationName:"replicationConfiguration" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeRegistryOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeRegistryOutput) GoString() string {
- return s.String()
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DescribeRegistryOutput) SetRegistryId(v string) *DescribeRegistryOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
-func (s *DescribeRegistryOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *DescribeRegistryOutput {
- s.ReplicationConfiguration = v
- return s
-}
-
-type DescribeRepositoriesInput struct {
- _ struct{} `type:"structure"`
-
- // The maximum number of repository results returned by DescribeRepositories
- // in paginated output. When this parameter is used, DescribeRepositories only
- // returns maxResults results in a single page along with a nextToken response
- // element. The remaining results of the initial request can be seen by sending
- // another DescribeRepositories request with the returned nextToken value. This
- // value can be between 1 and 1000. If this parameter is not used, then DescribeRepositories
- // returns up to 100 results and a nextToken value, if applicable. This option
- // cannot be used when you specify repositories with repositoryNames.
- MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
-
- // The nextToken value returned from a previous paginated DescribeRepositories
- // request where maxResults was used and the results exceeded the value of that
- // parameter. Pagination continues from the end of the previous results that
- // returned the nextToken value. This value is null when there are no more results
- // to return. This option cannot be used when you specify repositories with
- // repositoryNames.
- //
- // This token should be treated as an opaque identifier that is only used to
- // retrieve the next items in a list and not for other programmatic purposes.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repositories to be described. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // A list of repositories to describe. If this parameter is omitted, then all
- // repositories in a registry are described.
- RepositoryNames []*string `locationName:"repositoryNames" min:"1" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeRepositoriesInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeRepositoriesInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeRepositoriesInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeRepositoriesInput"}
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
- if s.RepositoryNames != nil && len(s.RepositoryNames) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryNames", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *DescribeRepositoriesInput) SetMaxResults(v int64) *DescribeRepositoriesInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *DescribeRepositoriesInput) SetNextToken(v string) *DescribeRepositoriesInput {
- s.NextToken = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *DescribeRepositoriesInput) SetRegistryId(v string) *DescribeRepositoriesInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryNames sets the RepositoryNames field's value.
-func (s *DescribeRepositoriesInput) SetRepositoryNames(v []*string) *DescribeRepositoriesInput {
- s.RepositoryNames = v
- return s
-}
-
-type DescribeRepositoriesOutput struct {
- _ struct{} `type:"structure"`
-
- // The nextToken value to include in a future DescribeRepositories request.
- // When the results of a DescribeRepositories request exceed maxResults, this
- // value can be used to retrieve the next page of results. This value is null
- // when there are no more results to return.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // A list of repository objects corresponding to valid repositories.
- Repositories []*Repository `locationName:"repositories" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeRepositoriesOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeRepositoriesOutput) GoString() string {
- return s.String()
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *DescribeRepositoriesOutput) SetNextToken(v string) *DescribeRepositoriesOutput {
- s.NextToken = &v
- return s
-}
-
-// SetRepositories sets the Repositories field's value.
-func (s *DescribeRepositoriesOutput) SetRepositories(v []*Repository) *DescribeRepositoriesOutput {
- s.Repositories = v
- return s
-}
-
-// The specified layer upload does not contain any layer parts.
-type EmptyUploadException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EmptyUploadException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EmptyUploadException) GoString() string {
- return s.String()
-}
-
-func newErrorEmptyUploadException(v protocol.ResponseMetadata) error {
- return &EmptyUploadException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *EmptyUploadException) Code() string {
- return "EmptyUploadException"
-}
-
-// Message returns the exception's message.
-func (s *EmptyUploadException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *EmptyUploadException) OrigErr() error {
- return nil
-}
-
-func (s *EmptyUploadException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *EmptyUploadException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *EmptyUploadException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The encryption configuration for the repository. This determines how the
-// contents of your repository are encrypted at rest.
-//
-// By default, when no encryption configuration is set or the AES256 encryption
-// type is used, Amazon ECR uses server-side encryption with Amazon S3-managed
-// encryption keys which encrypts your data at rest using an AES-256 encryption
-// algorithm. This does not require any action on your part.
-//
-// For more control over the encryption of the contents of your repository,
-// you can use server-side encryption with Key Management Service key stored
-// in Key Management Service (KMS) to encrypt your images. For more information,
-// see Amazon ECR encryption at rest (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html)
-// in the Amazon Elastic Container Registry User Guide.
-type EncryptionConfiguration struct {
- _ struct{} `type:"structure"`
-
- // The encryption type to use.
- //
- // If you use the KMS encryption type, the contents of the repository will be
- // encrypted using server-side encryption with Key Management Service key stored
- // in KMS. When you use KMS to encrypt your data, you can either use the default
- // Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS
- // key, which you already created. For more information, see Protecting data
- // using server-side encryption with an KMS key stored in Key Management Service
- // (SSE-KMS) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html)
- // in the Amazon Simple Storage Service Console Developer Guide.
- //
- // If you use the AES256 encryption type, Amazon ECR uses server-side encryption
- // with Amazon S3-managed encryption keys which encrypts the images in the repository
- // using an AES-256 encryption algorithm. For more information, see Protecting
- // data using server-side encryption with Amazon S3-managed encryption keys
- // (SSE-S3) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
- // in the Amazon Simple Storage Service Console Developer Guide.
- //
- // EncryptionType is a required field
- EncryptionType *string `locationName:"encryptionType" type:"string" required:"true" enum:"EncryptionType"`
-
- // If you use the KMS encryption type, specify the KMS key to use for encryption.
- // The alias, key ID, or full ARN of the KMS key can be specified. The key must
- // exist in the same Region as the repository. If no key is specified, the default
- // Amazon Web Services managed KMS key for Amazon ECR will be used.
- KmsKey *string `locationName:"kmsKey" min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EncryptionConfiguration) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EncryptionConfiguration) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *EncryptionConfiguration) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"}
- if s.EncryptionType == nil {
- invalidParams.Add(request.NewErrParamRequired("EncryptionType"))
- }
- if s.KmsKey != nil && len(*s.KmsKey) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("KmsKey", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetEncryptionType sets the EncryptionType field's value.
-func (s *EncryptionConfiguration) SetEncryptionType(v string) *EncryptionConfiguration {
- s.EncryptionType = &v
- return s
-}
-
-// SetKmsKey sets the KmsKey field's value.
-func (s *EncryptionConfiguration) SetKmsKey(v string) *EncryptionConfiguration {
- s.KmsKey = &v
- return s
-}
-
-// The details of an enhanced image scan. This is returned when enhanced scanning
-// is enabled for your private registry.
-type EnhancedImageScanFinding struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Web Services account ID associated with the image.
- AwsAccountId *string `locationName:"awsAccountId" type:"string"`
-
- // The description of the finding.
- Description *string `locationName:"description" type:"string"`
-
- // The Amazon Resource Number (ARN) of the finding.
- FindingArn *string `locationName:"findingArn" type:"string"`
-
- // The date and time that the finding was first observed.
- FirstObservedAt *time.Time `locationName:"firstObservedAt" type:"timestamp"`
-
- // The date and time that the finding was last observed.
- LastObservedAt *time.Time `locationName:"lastObservedAt" type:"timestamp"`
-
- // An object that contains the details of a package vulnerability finding.
- PackageVulnerabilityDetails *PackageVulnerabilityDetails `locationName:"packageVulnerabilityDetails" type:"structure"`
-
- // An object that contains the details about how to remediate a finding.
- Remediation *Remediation `locationName:"remediation" type:"structure"`
-
- // Contains information on the resources involved in a finding.
- Resources []*Resource `locationName:"resources" type:"list"`
-
- // The Amazon Inspector score given to the finding.
- Score *float64 `locationName:"score" type:"double"`
-
- // An object that contains details of the Amazon Inspector score.
- ScoreDetails *ScoreDetails `locationName:"scoreDetails" type:"structure"`
-
- // The severity of the finding.
- Severity *string `locationName:"severity" type:"string"`
-
- // The status of the finding.
- Status *string `locationName:"status" type:"string"`
-
- // The title of the finding.
- Title *string `locationName:"title" type:"string"`
-
- // The type of the finding.
- Type *string `locationName:"type" type:"string"`
-
- // The date and time the finding was last updated at.
- UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EnhancedImageScanFinding) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EnhancedImageScanFinding) GoString() string {
- return s.String()
-}
-
-// SetAwsAccountId sets the AwsAccountId field's value.
-func (s *EnhancedImageScanFinding) SetAwsAccountId(v string) *EnhancedImageScanFinding {
- s.AwsAccountId = &v
- return s
-}
-
-// SetDescription sets the Description field's value.
-func (s *EnhancedImageScanFinding) SetDescription(v string) *EnhancedImageScanFinding {
- s.Description = &v
- return s
-}
-
-// SetFindingArn sets the FindingArn field's value.
-func (s *EnhancedImageScanFinding) SetFindingArn(v string) *EnhancedImageScanFinding {
- s.FindingArn = &v
- return s
-}
-
-// SetFirstObservedAt sets the FirstObservedAt field's value.
-func (s *EnhancedImageScanFinding) SetFirstObservedAt(v time.Time) *EnhancedImageScanFinding {
- s.FirstObservedAt = &v
- return s
-}
-
-// SetLastObservedAt sets the LastObservedAt field's value.
-func (s *EnhancedImageScanFinding) SetLastObservedAt(v time.Time) *EnhancedImageScanFinding {
- s.LastObservedAt = &v
- return s
-}
-
-// SetPackageVulnerabilityDetails sets the PackageVulnerabilityDetails field's value.
-func (s *EnhancedImageScanFinding) SetPackageVulnerabilityDetails(v *PackageVulnerabilityDetails) *EnhancedImageScanFinding {
- s.PackageVulnerabilityDetails = v
- return s
-}
-
-// SetRemediation sets the Remediation field's value.
-func (s *EnhancedImageScanFinding) SetRemediation(v *Remediation) *EnhancedImageScanFinding {
- s.Remediation = v
- return s
-}
-
-// SetResources sets the Resources field's value.
-func (s *EnhancedImageScanFinding) SetResources(v []*Resource) *EnhancedImageScanFinding {
- s.Resources = v
- return s
-}
-
-// SetScore sets the Score field's value.
-func (s *EnhancedImageScanFinding) SetScore(v float64) *EnhancedImageScanFinding {
- s.Score = &v
- return s
-}
-
-// SetScoreDetails sets the ScoreDetails field's value.
-func (s *EnhancedImageScanFinding) SetScoreDetails(v *ScoreDetails) *EnhancedImageScanFinding {
- s.ScoreDetails = v
- return s
-}
-
-// SetSeverity sets the Severity field's value.
-func (s *EnhancedImageScanFinding) SetSeverity(v string) *EnhancedImageScanFinding {
- s.Severity = &v
- return s
-}
-
-// SetStatus sets the Status field's value.
-func (s *EnhancedImageScanFinding) SetStatus(v string) *EnhancedImageScanFinding {
- s.Status = &v
- return s
-}
-
-// SetTitle sets the Title field's value.
-func (s *EnhancedImageScanFinding) SetTitle(v string) *EnhancedImageScanFinding {
- s.Title = &v
- return s
-}
-
-// SetType sets the Type field's value.
-func (s *EnhancedImageScanFinding) SetType(v string) *EnhancedImageScanFinding {
- s.Type = &v
- return s
-}
-
-// SetUpdatedAt sets the UpdatedAt field's value.
-func (s *EnhancedImageScanFinding) SetUpdatedAt(v time.Time) *EnhancedImageScanFinding {
- s.UpdatedAt = &v
- return s
-}
-
-type GetAuthorizationTokenInput struct {
- _ struct{} `type:"structure"`
-
- // A list of Amazon Web Services account IDs that are associated with the registries
- // for which to get AuthorizationData objects. If you do not specify a registry,
- // the default registry is assumed.
- //
- // Deprecated: This field is deprecated. The returned authorization token can be used to access any Amazon ECR registry that the IAM principal has access to, specifying a registry ID doesn't change the permissions scope of the authorization token.
- RegistryIds []*string `locationName:"registryIds" min:"1" deprecated:"true" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetAuthorizationTokenInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetAuthorizationTokenInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetAuthorizationTokenInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetAuthorizationTokenInput"}
- if s.RegistryIds != nil && len(s.RegistryIds) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("RegistryIds", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegistryIds sets the RegistryIds field's value.
-func (s *GetAuthorizationTokenInput) SetRegistryIds(v []*string) *GetAuthorizationTokenInput {
- s.RegistryIds = v
- return s
-}
-
-type GetAuthorizationTokenOutput struct {
- _ struct{} `type:"structure"`
-
- // A list of authorization token data objects that correspond to the registryIds
- // values in the request.
- AuthorizationData []*AuthorizationData `locationName:"authorizationData" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetAuthorizationTokenOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetAuthorizationTokenOutput) GoString() string {
- return s.String()
-}
-
-// SetAuthorizationData sets the AuthorizationData field's value.
-func (s *GetAuthorizationTokenOutput) SetAuthorizationData(v []*AuthorizationData) *GetAuthorizationTokenOutput {
- s.AuthorizationData = v
- return s
-}
-
-type GetDownloadUrlForLayerInput struct {
- _ struct{} `type:"structure"`
-
- // The digest of the image layer to download.
- //
- // LayerDigest is a required field
- LayerDigest *string `locationName:"layerDigest" type:"string" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the image layer to download. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository that is associated with the image layer to download.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetDownloadUrlForLayerInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetDownloadUrlForLayerInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetDownloadUrlForLayerInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetDownloadUrlForLayerInput"}
- if s.LayerDigest == nil {
- invalidParams.Add(request.NewErrParamRequired("LayerDigest"))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetLayerDigest sets the LayerDigest field's value.
-func (s *GetDownloadUrlForLayerInput) SetLayerDigest(v string) *GetDownloadUrlForLayerInput {
- s.LayerDigest = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetDownloadUrlForLayerInput) SetRegistryId(v string) *GetDownloadUrlForLayerInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *GetDownloadUrlForLayerInput) SetRepositoryName(v string) *GetDownloadUrlForLayerInput {
- s.RepositoryName = &v
- return s
-}
-
-type GetDownloadUrlForLayerOutput struct {
- _ struct{} `type:"structure"`
-
- // The pre-signed Amazon S3 download URL for the requested layer.
- DownloadUrl *string `locationName:"downloadUrl" type:"string"`
-
- // The digest of the image layer to download.
- LayerDigest *string `locationName:"layerDigest" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetDownloadUrlForLayerOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetDownloadUrlForLayerOutput) GoString() string {
- return s.String()
-}
-
-// SetDownloadUrl sets the DownloadUrl field's value.
-func (s *GetDownloadUrlForLayerOutput) SetDownloadUrl(v string) *GetDownloadUrlForLayerOutput {
- s.DownloadUrl = &v
- return s
-}
-
-// SetLayerDigest sets the LayerDigest field's value.
-func (s *GetDownloadUrlForLayerOutput) SetLayerDigest(v string) *GetDownloadUrlForLayerOutput {
- s.LayerDigest = &v
- return s
-}
-
-type GetLifecyclePolicyInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository. If you do not specify a registry, the default registry is
- // assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetLifecyclePolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetLifecyclePolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetLifecyclePolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyInput"}
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetLifecyclePolicyInput) SetRegistryId(v string) *GetLifecyclePolicyInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *GetLifecyclePolicyInput) SetRepositoryName(v string) *GetLifecyclePolicyInput {
- s.RepositoryName = &v
- return s
-}
-
-type GetLifecyclePolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The time stamp of the last time that the lifecycle policy was run.
- LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp"`
-
- // The JSON lifecycle policy text.
- LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetLifecyclePolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetLifecyclePolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetLastEvaluatedAt sets the LastEvaluatedAt field's value.
-func (s *GetLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *GetLifecyclePolicyOutput {
- s.LastEvaluatedAt = &v
- return s
-}
-
-// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
-func (s *GetLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyOutput {
- s.LifecyclePolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetLifecyclePolicyOutput) SetRegistryId(v string) *GetLifecyclePolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *GetLifecyclePolicyOutput) SetRepositoryName(v string) *GetLifecyclePolicyOutput {
- s.RepositoryName = &v
- return s
-}
-
-type GetLifecyclePolicyPreviewInput struct {
- _ struct{} `type:"structure"`
-
- // An optional parameter that filters results based on image tag status and
- // all tags, if tagged.
- Filter *LifecyclePolicyPreviewFilter `locationName:"filter" type:"structure"`
-
- // The list of imageIDs to be included.
- ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
-
- // The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest
- // in paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest
- // only returns maxResults results in a single page along with a nextToken response
- // element. The remaining results of the initial request can be seen by sending
- // another GetLifecyclePolicyPreviewRequest request with the returned nextToken
- // value. This value can be between 1 and 1000. If this parameter is not used,
- // then GetLifecyclePolicyPreviewRequest returns up to 100 results and a nextToken
- // value, if applicable. This option cannot be used when you specify images
- // with imageIds.
- MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
-
- // The nextToken value returned from a previous paginated GetLifecyclePolicyPreviewRequest
- // request where maxResults was used and the results exceeded the value of that
- // parameter. Pagination continues from the end of the previous results that
- // returned the nextToken value. This value is null when there are no more results
- // to return. This option cannot be used when you specify images with imageIds.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository. If you do not specify a registry, the default registry is
- // assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetLifecyclePolicyPreviewInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetLifecyclePolicyPreviewInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetLifecyclePolicyPreviewInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyPreviewInput"}
- if s.ImageIds != nil && len(s.ImageIds) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1))
- }
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.ImageIds != nil {
- for i, v := range s.ImageIds {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ImageIds", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetFilter sets the Filter field's value.
-func (s *GetLifecyclePolicyPreviewInput) SetFilter(v *LifecyclePolicyPreviewFilter) *GetLifecyclePolicyPreviewInput {
- s.Filter = v
- return s
-}
-
-// SetImageIds sets the ImageIds field's value.
-func (s *GetLifecyclePolicyPreviewInput) SetImageIds(v []*ImageIdentifier) *GetLifecyclePolicyPreviewInput {
- s.ImageIds = v
- return s
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *GetLifecyclePolicyPreviewInput) SetMaxResults(v int64) *GetLifecyclePolicyPreviewInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *GetLifecyclePolicyPreviewInput) SetNextToken(v string) *GetLifecyclePolicyPreviewInput {
- s.NextToken = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetLifecyclePolicyPreviewInput) SetRegistryId(v string) *GetLifecyclePolicyPreviewInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *GetLifecyclePolicyPreviewInput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewInput {
- s.RepositoryName = &v
- return s
-}
-
-type GetLifecyclePolicyPreviewOutput struct {
- _ struct{} `type:"structure"`
-
- // The JSON lifecycle policy text.
- LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
-
- // The nextToken value to include in a future GetLifecyclePolicyPreview request.
- // When the results of a GetLifecyclePolicyPreview request exceed maxResults,
- // this value can be used to retrieve the next page of results. This value is
- // null when there are no more results to return.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The results of the lifecycle policy preview request.
- PreviewResults []*LifecyclePolicyPreviewResult `locationName:"previewResults" type:"list"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-
- // The status of the lifecycle policy preview request.
- Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"`
-
- // The list of images that is returned as a result of the action.
- Summary *LifecyclePolicyPreviewSummary `locationName:"summary" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetLifecyclePolicyPreviewOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetLifecyclePolicyPreviewOutput) GoString() string {
- return s.String()
-}
-
-// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
-func (s *GetLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyPreviewOutput {
- s.LifecyclePolicyText = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *GetLifecyclePolicyPreviewOutput) SetNextToken(v string) *GetLifecyclePolicyPreviewOutput {
- s.NextToken = &v
- return s
-}
-
-// SetPreviewResults sets the PreviewResults field's value.
-func (s *GetLifecyclePolicyPreviewOutput) SetPreviewResults(v []*LifecyclePolicyPreviewResult) *GetLifecyclePolicyPreviewOutput {
- s.PreviewResults = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetLifecyclePolicyPreviewOutput) SetRegistryId(v string) *GetLifecyclePolicyPreviewOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *GetLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewOutput {
- s.RepositoryName = &v
- return s
-}
-
-// SetStatus sets the Status field's value.
-func (s *GetLifecyclePolicyPreviewOutput) SetStatus(v string) *GetLifecyclePolicyPreviewOutput {
- s.Status = &v
- return s
-}
-
-// SetSummary sets the Summary field's value.
-func (s *GetLifecyclePolicyPreviewOutput) SetSummary(v *LifecyclePolicyPreviewSummary) *GetLifecyclePolicyPreviewOutput {
- s.Summary = v
- return s
-}
-
-type GetRegistryPolicyInput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRegistryPolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRegistryPolicyInput) GoString() string {
- return s.String()
-}
-
-type GetRegistryPolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The JSON text of the permissions policy for a registry.
- PolicyText *string `locationName:"policyText" type:"string"`
-
- // The ID of the registry.
- RegistryId *string `locationName:"registryId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRegistryPolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRegistryPolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetPolicyText sets the PolicyText field's value.
-func (s *GetRegistryPolicyOutput) SetPolicyText(v string) *GetRegistryPolicyOutput {
- s.PolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetRegistryPolicyOutput) SetRegistryId(v string) *GetRegistryPolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-type GetRegistryScanningConfigurationInput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRegistryScanningConfigurationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRegistryScanningConfigurationInput) GoString() string {
- return s.String()
-}
-
-type GetRegistryScanningConfigurationOutput struct {
- _ struct{} `type:"structure"`
-
- // The ID of the registry.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The scanning configuration for the registry.
- ScanningConfiguration *RegistryScanningConfiguration `locationName:"scanningConfiguration" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRegistryScanningConfigurationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRegistryScanningConfigurationOutput) GoString() string {
- return s.String()
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetRegistryScanningConfigurationOutput) SetRegistryId(v string) *GetRegistryScanningConfigurationOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetScanningConfiguration sets the ScanningConfiguration field's value.
-func (s *GetRegistryScanningConfigurationOutput) SetScanningConfiguration(v *RegistryScanningConfiguration) *GetRegistryScanningConfigurationOutput {
- s.ScanningConfiguration = v
- return s
-}
-
-type GetRepositoryPolicyInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository. If you do not specify a registry, the default registry is
- // assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository with the policy to retrieve.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRepositoryPolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRepositoryPolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetRepositoryPolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetRepositoryPolicyInput"}
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetRepositoryPolicyInput) SetRegistryId(v string) *GetRepositoryPolicyInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *GetRepositoryPolicyInput) SetRepositoryName(v string) *GetRepositoryPolicyInput {
- s.RepositoryName = &v
- return s
-}
-
-type GetRepositoryPolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The JSON repository policy text associated with the repository.
- PolicyText *string `locationName:"policyText" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRepositoryPolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRepositoryPolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetPolicyText sets the PolicyText field's value.
-func (s *GetRepositoryPolicyOutput) SetPolicyText(v string) *GetRepositoryPolicyOutput {
- s.PolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *GetRepositoryPolicyOutput) SetRegistryId(v string) *GetRepositoryPolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *GetRepositoryPolicyOutput) SetRepositoryName(v string) *GetRepositoryPolicyOutput {
- s.RepositoryName = &v
- return s
-}
-
-// An object representing an Amazon ECR image.
-type Image struct {
- _ struct{} `type:"structure"`
-
- // An object containing the image tag and image digest associated with an image.
- ImageId *ImageIdentifier `locationName:"imageId" type:"structure"`
-
- // The image manifest associated with the image.
- ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"`
-
- // The manifest media type of the image.
- ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry containing
- // the image.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository associated with the image.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Image) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Image) GoString() string {
- return s.String()
-}
-
-// SetImageId sets the ImageId field's value.
-func (s *Image) SetImageId(v *ImageIdentifier) *Image {
- s.ImageId = v
- return s
-}
-
-// SetImageManifest sets the ImageManifest field's value.
-func (s *Image) SetImageManifest(v string) *Image {
- s.ImageManifest = &v
- return s
-}
-
-// SetImageManifestMediaType sets the ImageManifestMediaType field's value.
-func (s *Image) SetImageManifestMediaType(v string) *Image {
- s.ImageManifestMediaType = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *Image) SetRegistryId(v string) *Image {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *Image) SetRepositoryName(v string) *Image {
- s.RepositoryName = &v
- return s
-}
-
-// The specified image has already been pushed, and there were no changes to
-// the manifest or image tag after the last push.
-type ImageAlreadyExistsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageAlreadyExistsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageAlreadyExistsException) GoString() string {
- return s.String()
-}
-
-func newErrorImageAlreadyExistsException(v protocol.ResponseMetadata) error {
- return &ImageAlreadyExistsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ImageAlreadyExistsException) Code() string {
- return "ImageAlreadyExistsException"
-}
-
-// Message returns the exception's message.
-func (s *ImageAlreadyExistsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ImageAlreadyExistsException) OrigErr() error {
- return nil
-}
-
-func (s *ImageAlreadyExistsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ImageAlreadyExistsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ImageAlreadyExistsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// An object that describes an image returned by a DescribeImages operation.
-type ImageDetail struct {
- _ struct{} `type:"structure"`
-
- // The artifact media type of the image.
- ArtifactMediaType *string `locationName:"artifactMediaType" type:"string"`
-
- // The sha256 digest of the image manifest.
- ImageDigest *string `locationName:"imageDigest" type:"string"`
-
- // The media type of the image manifest.
- ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"`
-
- // The date and time, expressed in standard JavaScript date format, at which
- // the current image was pushed to the repository.
- ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"`
-
- // A summary of the last completed image scan.
- ImageScanFindingsSummary *ImageScanFindingsSummary `locationName:"imageScanFindingsSummary" type:"structure"`
-
- // The current state of the scan.
- ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"`
-
- // The size, in bytes, of the image in the repository.
- //
- // If the image is a manifest list, this will be the max size of all manifests
- // in the list.
- //
- // Beginning with Docker version 1.9, the Docker client compresses image layers
- // before pushing them to a V2 Docker registry. The output of the docker images
- // command shows the uncompressed image size, so it may return a larger image
- // size than the image sizes returned by DescribeImages.
- ImageSizeInBytes *int64 `locationName:"imageSizeInBytes" type:"long"`
-
- // The list of tags associated with this image.
- ImageTags []*string `locationName:"imageTags" type:"list"`
-
- // The date and time, expressed in standard JavaScript date format, when Amazon
- // ECR recorded the last image pull.
- //
- // Amazon ECR refreshes the last image pull timestamp at least once every 24
- // hours. For example, if you pull an image once a day then the lastRecordedPullTime
- // timestamp will indicate the exact time that the image was last pulled. However,
- // if you pull an image once an hour, because Amazon ECR refreshes the lastRecordedPullTime
- // timestamp at least once every 24 hours, the result may not be the exact time
- // that the image was last pulled.
- LastRecordedPullTime *time.Time `locationName:"lastRecordedPullTime" type:"timestamp"`
-
- // The Amazon Web Services account ID associated with the registry to which
- // this image belongs.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository to which this image belongs.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageDetail) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageDetail) GoString() string {
- return s.String()
-}
-
-// SetArtifactMediaType sets the ArtifactMediaType field's value.
-func (s *ImageDetail) SetArtifactMediaType(v string) *ImageDetail {
- s.ArtifactMediaType = &v
- return s
-}
-
-// SetImageDigest sets the ImageDigest field's value.
-func (s *ImageDetail) SetImageDigest(v string) *ImageDetail {
- s.ImageDigest = &v
- return s
-}
-
-// SetImageManifestMediaType sets the ImageManifestMediaType field's value.
-func (s *ImageDetail) SetImageManifestMediaType(v string) *ImageDetail {
- s.ImageManifestMediaType = &v
- return s
-}
-
-// SetImagePushedAt sets the ImagePushedAt field's value.
-func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail {
- s.ImagePushedAt = &v
- return s
-}
-
-// SetImageScanFindingsSummary sets the ImageScanFindingsSummary field's value.
-func (s *ImageDetail) SetImageScanFindingsSummary(v *ImageScanFindingsSummary) *ImageDetail {
- s.ImageScanFindingsSummary = v
- return s
-}
-
-// SetImageScanStatus sets the ImageScanStatus field's value.
-func (s *ImageDetail) SetImageScanStatus(v *ImageScanStatus) *ImageDetail {
- s.ImageScanStatus = v
- return s
-}
-
-// SetImageSizeInBytes sets the ImageSizeInBytes field's value.
-func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail {
- s.ImageSizeInBytes = &v
- return s
-}
-
-// SetImageTags sets the ImageTags field's value.
-func (s *ImageDetail) SetImageTags(v []*string) *ImageDetail {
- s.ImageTags = v
- return s
-}
-
-// SetLastRecordedPullTime sets the LastRecordedPullTime field's value.
-func (s *ImageDetail) SetLastRecordedPullTime(v time.Time) *ImageDetail {
- s.LastRecordedPullTime = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *ImageDetail) SetRegistryId(v string) *ImageDetail {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail {
- s.RepositoryName = &v
- return s
-}
-
-// The specified image digest does not match the digest that Amazon ECR calculated
-// for the image.
-type ImageDigestDoesNotMatchException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageDigestDoesNotMatchException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageDigestDoesNotMatchException) GoString() string {
- return s.String()
-}
-
-func newErrorImageDigestDoesNotMatchException(v protocol.ResponseMetadata) error {
- return &ImageDigestDoesNotMatchException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ImageDigestDoesNotMatchException) Code() string {
- return "ImageDigestDoesNotMatchException"
-}
-
-// Message returns the exception's message.
-func (s *ImageDigestDoesNotMatchException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ImageDigestDoesNotMatchException) OrigErr() error {
- return nil
-}
-
-func (s *ImageDigestDoesNotMatchException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ImageDigestDoesNotMatchException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ImageDigestDoesNotMatchException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// An object representing an Amazon ECR image failure.
-type ImageFailure struct {
- _ struct{} `type:"structure"`
-
- // The code associated with the failure.
- FailureCode *string `locationName:"failureCode" type:"string" enum:"ImageFailureCode"`
-
- // The reason for the failure.
- FailureReason *string `locationName:"failureReason" type:"string"`
-
- // The image ID associated with the failure.
- ImageId *ImageIdentifier `locationName:"imageId" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageFailure) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageFailure) GoString() string {
- return s.String()
-}
-
-// SetFailureCode sets the FailureCode field's value.
-func (s *ImageFailure) SetFailureCode(v string) *ImageFailure {
- s.FailureCode = &v
- return s
-}
-
-// SetFailureReason sets the FailureReason field's value.
-func (s *ImageFailure) SetFailureReason(v string) *ImageFailure {
- s.FailureReason = &v
- return s
-}
-
-// SetImageId sets the ImageId field's value.
-func (s *ImageFailure) SetImageId(v *ImageIdentifier) *ImageFailure {
- s.ImageId = v
- return s
-}
-
-// An object with identifying information for an image in an Amazon ECR repository.
-type ImageIdentifier struct {
- _ struct{} `type:"structure"`
-
- // The sha256 digest of the image manifest.
- ImageDigest *string `locationName:"imageDigest" type:"string"`
-
- // The tag used for the image.
- ImageTag *string `locationName:"imageTag" min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageIdentifier) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageIdentifier) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ImageIdentifier) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ImageIdentifier"}
- if s.ImageTag != nil && len(*s.ImageTag) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ImageTag", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImageDigest sets the ImageDigest field's value.
-func (s *ImageIdentifier) SetImageDigest(v string) *ImageIdentifier {
- s.ImageDigest = &v
- return s
-}
-
-// SetImageTag sets the ImageTag field's value.
-func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier {
- s.ImageTag = &v
- return s
-}
-
-// The image requested does not exist in the specified repository.
-type ImageNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorImageNotFoundException(v protocol.ResponseMetadata) error {
- return &ImageNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ImageNotFoundException) Code() string {
- return "ImageNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *ImageNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ImageNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *ImageNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ImageNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ImageNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The status of the replication process for an image.
-type ImageReplicationStatus struct {
- _ struct{} `type:"structure"`
-
- // The failure code for a replication that has failed.
- FailureCode *string `locationName:"failureCode" type:"string"`
-
- // The destination Region for the image replication.
- Region *string `locationName:"region" min:"2" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry to which
- // the image belongs.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The image replication status.
- Status *string `locationName:"status" type:"string" enum:"ReplicationStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageReplicationStatus) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageReplicationStatus) GoString() string {
- return s.String()
-}
-
-// SetFailureCode sets the FailureCode field's value.
-func (s *ImageReplicationStatus) SetFailureCode(v string) *ImageReplicationStatus {
- s.FailureCode = &v
- return s
-}
-
-// SetRegion sets the Region field's value.
-func (s *ImageReplicationStatus) SetRegion(v string) *ImageReplicationStatus {
- s.Region = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *ImageReplicationStatus) SetRegistryId(v string) *ImageReplicationStatus {
- s.RegistryId = &v
- return s
-}
-
-// SetStatus sets the Status field's value.
-func (s *ImageReplicationStatus) SetStatus(v string) *ImageReplicationStatus {
- s.Status = &v
- return s
-}
-
-// Contains information about an image scan finding.
-type ImageScanFinding struct {
- _ struct{} `type:"structure"`
-
- // A collection of attributes of the host from which the finding is generated.
- Attributes []*Attribute `locationName:"attributes" type:"list"`
-
- // The description of the finding.
- Description *string `locationName:"description" type:"string"`
-
- // The name associated with the finding, usually a CVE number.
- Name *string `locationName:"name" type:"string"`
-
- // The finding severity.
- Severity *string `locationName:"severity" type:"string" enum:"FindingSeverity"`
-
- // A link containing additional details about the security vulnerability.
- Uri *string `locationName:"uri" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanFinding) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanFinding) GoString() string {
- return s.String()
-}
-
-// SetAttributes sets the Attributes field's value.
-func (s *ImageScanFinding) SetAttributes(v []*Attribute) *ImageScanFinding {
- s.Attributes = v
- return s
-}
-
-// SetDescription sets the Description field's value.
-func (s *ImageScanFinding) SetDescription(v string) *ImageScanFinding {
- s.Description = &v
- return s
-}
-
-// SetName sets the Name field's value.
-func (s *ImageScanFinding) SetName(v string) *ImageScanFinding {
- s.Name = &v
- return s
-}
-
-// SetSeverity sets the Severity field's value.
-func (s *ImageScanFinding) SetSeverity(v string) *ImageScanFinding {
- s.Severity = &v
- return s
-}
-
-// SetUri sets the Uri field's value.
-func (s *ImageScanFinding) SetUri(v string) *ImageScanFinding {
- s.Uri = &v
- return s
-}
-
-// The details of an image scan.
-type ImageScanFindings struct {
- _ struct{} `type:"structure"`
-
- // Details about the enhanced scan findings from Amazon Inspector.
- EnhancedFindings []*EnhancedImageScanFinding `locationName:"enhancedFindings" type:"list"`
-
- // The image vulnerability counts, sorted by severity.
- FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"`
-
- // The findings from the image scan.
- Findings []*ImageScanFinding `locationName:"findings" type:"list"`
-
- // The time of the last completed image scan.
- ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"`
-
- // The time when the vulnerability data was last scanned.
- VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanFindings) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanFindings) GoString() string {
- return s.String()
-}
-
-// SetEnhancedFindings sets the EnhancedFindings field's value.
-func (s *ImageScanFindings) SetEnhancedFindings(v []*EnhancedImageScanFinding) *ImageScanFindings {
- s.EnhancedFindings = v
- return s
-}
-
-// SetFindingSeverityCounts sets the FindingSeverityCounts field's value.
-func (s *ImageScanFindings) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindings {
- s.FindingSeverityCounts = v
- return s
-}
-
-// SetFindings sets the Findings field's value.
-func (s *ImageScanFindings) SetFindings(v []*ImageScanFinding) *ImageScanFindings {
- s.Findings = v
- return s
-}
-
-// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value.
-func (s *ImageScanFindings) SetImageScanCompletedAt(v time.Time) *ImageScanFindings {
- s.ImageScanCompletedAt = &v
- return s
-}
-
-// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value.
-func (s *ImageScanFindings) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindings {
- s.VulnerabilitySourceUpdatedAt = &v
- return s
-}
-
-// A summary of the last completed image scan.
-type ImageScanFindingsSummary struct {
- _ struct{} `type:"structure"`
-
- // The image vulnerability counts, sorted by severity.
- FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"`
-
- // The time of the last completed image scan.
- ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"`
-
- // The time when the vulnerability data was last scanned.
- VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanFindingsSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanFindingsSummary) GoString() string {
- return s.String()
-}
-
-// SetFindingSeverityCounts sets the FindingSeverityCounts field's value.
-func (s *ImageScanFindingsSummary) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindingsSummary {
- s.FindingSeverityCounts = v
- return s
-}
-
-// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value.
-func (s *ImageScanFindingsSummary) SetImageScanCompletedAt(v time.Time) *ImageScanFindingsSummary {
- s.ImageScanCompletedAt = &v
- return s
-}
-
-// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value.
-func (s *ImageScanFindingsSummary) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindingsSummary {
- s.VulnerabilitySourceUpdatedAt = &v
- return s
-}
-
-// The current status of an image scan.
-type ImageScanStatus struct {
- _ struct{} `type:"structure"`
-
- // The description of the image scan status.
- Description *string `locationName:"description" type:"string"`
-
- // The current state of an image scan.
- Status *string `locationName:"status" type:"string" enum:"ScanStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanStatus) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanStatus) GoString() string {
- return s.String()
-}
-
-// SetDescription sets the Description field's value.
-func (s *ImageScanStatus) SetDescription(v string) *ImageScanStatus {
- s.Description = &v
- return s
-}
-
-// SetStatus sets the Status field's value.
-func (s *ImageScanStatus) SetStatus(v string) *ImageScanStatus {
- s.Status = &v
- return s
-}
-
-// The image scanning configuration for a repository.
-type ImageScanningConfiguration struct {
- _ struct{} `type:"structure"`
-
- // The setting that determines whether images are scanned after being pushed
- // to a repository. If set to true, images will be scanned after being pushed.
- // If this parameter is not specified, it will default to false and images will
- // not be scanned unless a scan is manually started with the API_StartImageScan
- // (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_StartImageScan.html)
- // API.
- ScanOnPush *bool `locationName:"scanOnPush" type:"boolean"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanningConfiguration) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageScanningConfiguration) GoString() string {
- return s.String()
-}
-
-// SetScanOnPush sets the ScanOnPush field's value.
-func (s *ImageScanningConfiguration) SetScanOnPush(v bool) *ImageScanningConfiguration {
- s.ScanOnPush = &v
- return s
-}
-
-// The specified image is tagged with a tag that already exists. The repository
-// is configured for tag immutability.
-type ImageTagAlreadyExistsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageTagAlreadyExistsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImageTagAlreadyExistsException) GoString() string {
- return s.String()
-}
-
-func newErrorImageTagAlreadyExistsException(v protocol.ResponseMetadata) error {
- return &ImageTagAlreadyExistsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ImageTagAlreadyExistsException) Code() string {
- return "ImageTagAlreadyExistsException"
-}
-
-// Message returns the exception's message.
-func (s *ImageTagAlreadyExistsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ImageTagAlreadyExistsException) OrigErr() error {
- return nil
-}
-
-func (s *ImageTagAlreadyExistsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ImageTagAlreadyExistsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ImageTagAlreadyExistsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type InitiateLayerUploadInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Web Services account ID associated with the registry to which
- // you intend to upload layers. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository to which you intend to upload layers.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InitiateLayerUploadInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InitiateLayerUploadInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *InitiateLayerUploadInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "InitiateLayerUploadInput"}
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *InitiateLayerUploadInput) SetRegistryId(v string) *InitiateLayerUploadInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *InitiateLayerUploadInput) SetRepositoryName(v string) *InitiateLayerUploadInput {
- s.RepositoryName = &v
- return s
-}
-
-type InitiateLayerUploadOutput struct {
- _ struct{} `type:"structure"`
-
- // The size, in bytes, that Amazon ECR expects future layer part uploads to
- // be.
- PartSize *int64 `locationName:"partSize" type:"long"`
-
- // The upload ID for the layer upload. This parameter is passed to further UploadLayerPart
- // and CompleteLayerUpload operations.
- UploadId *string `locationName:"uploadId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InitiateLayerUploadOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InitiateLayerUploadOutput) GoString() string {
- return s.String()
-}
-
-// SetPartSize sets the PartSize field's value.
-func (s *InitiateLayerUploadOutput) SetPartSize(v int64) *InitiateLayerUploadOutput {
- s.PartSize = &v
- return s
-}
-
-// SetUploadId sets the UploadId field's value.
-func (s *InitiateLayerUploadOutput) SetUploadId(v string) *InitiateLayerUploadOutput {
- s.UploadId = &v
- return s
-}
-
-// The layer digest calculation performed by Amazon ECR upon receipt of the
-// image layer does not match the digest specified.
-type InvalidLayerException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidLayerException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidLayerException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidLayerException(v protocol.ResponseMetadata) error {
- return &InvalidLayerException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidLayerException) Code() string {
- return "InvalidLayerException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidLayerException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidLayerException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidLayerException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidLayerException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidLayerException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The layer part size is not valid, or the first byte specified is not consecutive
-// to the last byte of a previous layer part upload.
-type InvalidLayerPartException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The last valid byte received from the layer part upload that is associated
- // with the exception.
- LastValidByteReceived *int64 `locationName:"lastValidByteReceived" type:"long"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-
- // The registry ID associated with the exception.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the exception.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-
- // The upload ID associated with the exception.
- UploadId *string `locationName:"uploadId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidLayerPartException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidLayerPartException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidLayerPartException(v protocol.ResponseMetadata) error {
- return &InvalidLayerPartException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidLayerPartException) Code() string {
- return "InvalidLayerPartException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidLayerPartException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidLayerPartException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidLayerPartException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidLayerPartException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidLayerPartException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The specified parameter is invalid. Review the available parameters for the
-// API request.
-type InvalidParameterException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidParameterException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidParameterException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidParameterException(v protocol.ResponseMetadata) error {
- return &InvalidParameterException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidParameterException) Code() string {
- return "InvalidParameterException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidParameterException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidParameterException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidParameterException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidParameterException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidParameterException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// An invalid parameter has been specified. Tag keys can have a maximum character
-// length of 128 characters, and tag values can have a maximum length of 256
-// characters.
-type InvalidTagParameterException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidTagParameterException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidTagParameterException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidTagParameterException(v protocol.ResponseMetadata) error {
- return &InvalidTagParameterException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidTagParameterException) Code() string {
- return "InvalidTagParameterException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidTagParameterException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidTagParameterException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidTagParameterException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidTagParameterException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidTagParameterException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The operation failed due to a KMS exception.
-type KmsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error code returned by KMS.
- KmsError *string `locationName:"kmsError" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s KmsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s KmsException) GoString() string {
- return s.String()
-}
-
-func newErrorKmsException(v protocol.ResponseMetadata) error {
- return &KmsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *KmsException) Code() string {
- return "KmsException"
-}
-
-// Message returns the exception's message.
-func (s *KmsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *KmsException) OrigErr() error {
- return nil
-}
-
-func (s *KmsException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *KmsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *KmsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// An object representing an Amazon ECR image layer.
-type Layer struct {
- _ struct{} `type:"structure"`
-
- // The availability status of the image layer.
- LayerAvailability *string `locationName:"layerAvailability" type:"string" enum:"LayerAvailability"`
-
- // The sha256 digest of the image layer.
- LayerDigest *string `locationName:"layerDigest" type:"string"`
-
- // The size, in bytes, of the image layer.
- LayerSize *int64 `locationName:"layerSize" type:"long"`
-
- // The media type of the layer, such as application/vnd.docker.image.rootfs.diff.tar.gzip
- // or application/vnd.oci.image.layer.v1.tar+gzip.
- MediaType *string `locationName:"mediaType" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Layer) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Layer) GoString() string {
- return s.String()
-}
-
-// SetLayerAvailability sets the LayerAvailability field's value.
-func (s *Layer) SetLayerAvailability(v string) *Layer {
- s.LayerAvailability = &v
- return s
-}
-
-// SetLayerDigest sets the LayerDigest field's value.
-func (s *Layer) SetLayerDigest(v string) *Layer {
- s.LayerDigest = &v
- return s
-}
-
-// SetLayerSize sets the LayerSize field's value.
-func (s *Layer) SetLayerSize(v int64) *Layer {
- s.LayerSize = &v
- return s
-}
-
-// SetMediaType sets the MediaType field's value.
-func (s *Layer) SetMediaType(v string) *Layer {
- s.MediaType = &v
- return s
-}
-
-// The image layer already exists in the associated repository.
-type LayerAlreadyExistsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayerAlreadyExistsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayerAlreadyExistsException) GoString() string {
- return s.String()
-}
-
-func newErrorLayerAlreadyExistsException(v protocol.ResponseMetadata) error {
- return &LayerAlreadyExistsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LayerAlreadyExistsException) Code() string {
- return "LayerAlreadyExistsException"
-}
-
-// Message returns the exception's message.
-func (s *LayerAlreadyExistsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LayerAlreadyExistsException) OrigErr() error {
- return nil
-}
-
-func (s *LayerAlreadyExistsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LayerAlreadyExistsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LayerAlreadyExistsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// An object representing an Amazon ECR image layer failure.
-type LayerFailure struct {
- _ struct{} `type:"structure"`
-
- // The failure code associated with the failure.
- FailureCode *string `locationName:"failureCode" type:"string" enum:"LayerFailureCode"`
-
- // The reason for the failure.
- FailureReason *string `locationName:"failureReason" type:"string"`
-
- // The layer digest associated with the failure.
- LayerDigest *string `locationName:"layerDigest" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayerFailure) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayerFailure) GoString() string {
- return s.String()
-}
-
-// SetFailureCode sets the FailureCode field's value.
-func (s *LayerFailure) SetFailureCode(v string) *LayerFailure {
- s.FailureCode = &v
- return s
-}
-
-// SetFailureReason sets the FailureReason field's value.
-func (s *LayerFailure) SetFailureReason(v string) *LayerFailure {
- s.FailureReason = &v
- return s
-}
-
-// SetLayerDigest sets the LayerDigest field's value.
-func (s *LayerFailure) SetLayerDigest(v string) *LayerFailure {
- s.LayerDigest = &v
- return s
-}
-
-// The specified layer is not available because it is not associated with an
-// image. Unassociated image layers may be cleaned up at any time.
-type LayerInaccessibleException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayerInaccessibleException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayerInaccessibleException) GoString() string {
- return s.String()
-}
-
-func newErrorLayerInaccessibleException(v protocol.ResponseMetadata) error {
- return &LayerInaccessibleException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LayerInaccessibleException) Code() string {
- return "LayerInaccessibleException"
-}
-
-// Message returns the exception's message.
-func (s *LayerInaccessibleException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LayerInaccessibleException) OrigErr() error {
- return nil
-}
-
-func (s *LayerInaccessibleException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LayerInaccessibleException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LayerInaccessibleException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Layer parts must be at least 5 MiB in size.
-type LayerPartTooSmallException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayerPartTooSmallException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayerPartTooSmallException) GoString() string {
- return s.String()
-}
-
-func newErrorLayerPartTooSmallException(v protocol.ResponseMetadata) error {
- return &LayerPartTooSmallException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LayerPartTooSmallException) Code() string {
- return "LayerPartTooSmallException"
-}
-
-// Message returns the exception's message.
-func (s *LayerPartTooSmallException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LayerPartTooSmallException) OrigErr() error {
- return nil
-}
-
-func (s *LayerPartTooSmallException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LayerPartTooSmallException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LayerPartTooSmallException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The specified layers could not be found, or the specified layer is not valid
-// for this repository.
-type LayersNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayersNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LayersNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorLayersNotFoundException(v protocol.ResponseMetadata) error {
- return &LayersNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LayersNotFoundException) Code() string {
- return "LayersNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *LayersNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LayersNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *LayersNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LayersNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LayersNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The lifecycle policy could not be found, and no policy is set to the repository.
-type LifecyclePolicyNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorLifecyclePolicyNotFoundException(v protocol.ResponseMetadata) error {
- return &LifecyclePolicyNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LifecyclePolicyNotFoundException) Code() string {
- return "LifecyclePolicyNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *LifecyclePolicyNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LifecyclePolicyNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *LifecyclePolicyNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LifecyclePolicyNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LifecyclePolicyNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The filter for the lifecycle policy preview.
-type LifecyclePolicyPreviewFilter struct {
- _ struct{} `type:"structure"`
-
- // The tag status of the image.
- TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewFilter) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewFilter) GoString() string {
- return s.String()
-}
-
-// SetTagStatus sets the TagStatus field's value.
-func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPreviewFilter {
- s.TagStatus = &v
- return s
-}
-
-// The previous lifecycle policy preview request has not completed. Wait and
-// try again.
-type LifecyclePolicyPreviewInProgressException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewInProgressException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewInProgressException) GoString() string {
- return s.String()
-}
-
-func newErrorLifecyclePolicyPreviewInProgressException(v protocol.ResponseMetadata) error {
- return &LifecyclePolicyPreviewInProgressException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LifecyclePolicyPreviewInProgressException) Code() string {
- return "LifecyclePolicyPreviewInProgressException"
-}
-
-// Message returns the exception's message.
-func (s *LifecyclePolicyPreviewInProgressException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LifecyclePolicyPreviewInProgressException) OrigErr() error {
- return nil
-}
-
-func (s *LifecyclePolicyPreviewInProgressException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LifecyclePolicyPreviewInProgressException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LifecyclePolicyPreviewInProgressException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// There is no dry run for this repository.
-type LifecyclePolicyPreviewNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorLifecyclePolicyPreviewNotFoundException(v protocol.ResponseMetadata) error {
- return &LifecyclePolicyPreviewNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LifecyclePolicyPreviewNotFoundException) Code() string {
- return "LifecyclePolicyPreviewNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *LifecyclePolicyPreviewNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LifecyclePolicyPreviewNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *LifecyclePolicyPreviewNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LifecyclePolicyPreviewNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LifecyclePolicyPreviewNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The result of the lifecycle policy preview.
-type LifecyclePolicyPreviewResult struct {
- _ struct{} `type:"structure"`
-
- // The type of action to be taken.
- Action *LifecyclePolicyRuleAction `locationName:"action" type:"structure"`
-
- // The priority of the applied rule.
- AppliedRulePriority *int64 `locationName:"appliedRulePriority" min:"1" type:"integer"`
-
- // The sha256 digest of the image manifest.
- ImageDigest *string `locationName:"imageDigest" type:"string"`
-
- // The date and time, expressed in standard JavaScript date format, at which
- // the current image was pushed to the repository.
- ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"`
-
- // The list of tags associated with this image.
- ImageTags []*string `locationName:"imageTags" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewResult) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewResult) GoString() string {
- return s.String()
-}
-
-// SetAction sets the Action field's value.
-func (s *LifecyclePolicyPreviewResult) SetAction(v *LifecyclePolicyRuleAction) *LifecyclePolicyPreviewResult {
- s.Action = v
- return s
-}
-
-// SetAppliedRulePriority sets the AppliedRulePriority field's value.
-func (s *LifecyclePolicyPreviewResult) SetAppliedRulePriority(v int64) *LifecyclePolicyPreviewResult {
- s.AppliedRulePriority = &v
- return s
-}
-
-// SetImageDigest sets the ImageDigest field's value.
-func (s *LifecyclePolicyPreviewResult) SetImageDigest(v string) *LifecyclePolicyPreviewResult {
- s.ImageDigest = &v
- return s
-}
-
-// SetImagePushedAt sets the ImagePushedAt field's value.
-func (s *LifecyclePolicyPreviewResult) SetImagePushedAt(v time.Time) *LifecyclePolicyPreviewResult {
- s.ImagePushedAt = &v
- return s
-}
-
-// SetImageTags sets the ImageTags field's value.
-func (s *LifecyclePolicyPreviewResult) SetImageTags(v []*string) *LifecyclePolicyPreviewResult {
- s.ImageTags = v
- return s
-}
-
-// The summary of the lifecycle policy preview request.
-type LifecyclePolicyPreviewSummary struct {
- _ struct{} `type:"structure"`
-
- // The number of expiring images.
- ExpiringImageTotalCount *int64 `locationName:"expiringImageTotalCount" type:"integer"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyPreviewSummary) GoString() string {
- return s.String()
-}
-
-// SetExpiringImageTotalCount sets the ExpiringImageTotalCount field's value.
-func (s *LifecyclePolicyPreviewSummary) SetExpiringImageTotalCount(v int64) *LifecyclePolicyPreviewSummary {
- s.ExpiringImageTotalCount = &v
- return s
-}
-
-// The type of action to be taken.
-type LifecyclePolicyRuleAction struct {
- _ struct{} `type:"structure"`
-
- // The type of action to be taken.
- Type *string `locationName:"type" type:"string" enum:"ImageActionType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyRuleAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LifecyclePolicyRuleAction) GoString() string {
- return s.String()
-}
-
-// SetType sets the Type field's value.
-func (s *LifecyclePolicyRuleAction) SetType(v string) *LifecyclePolicyRuleAction {
- s.Type = &v
- return s
-}
-
-// The operation did not succeed because it would have exceeded a service limit
-// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
-// in the Amazon Elastic Container Registry User Guide.
-type LimitExceededException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LimitExceededException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LimitExceededException) GoString() string {
- return s.String()
-}
-
-func newErrorLimitExceededException(v protocol.ResponseMetadata) error {
- return &LimitExceededException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LimitExceededException) Code() string {
- return "LimitExceededException"
-}
-
-// Message returns the exception's message.
-func (s *LimitExceededException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LimitExceededException) OrigErr() error {
- return nil
-}
-
-func (s *LimitExceededException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LimitExceededException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LimitExceededException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// An object representing a filter on a ListImages operation.
-type ListImagesFilter struct {
- _ struct{} `type:"structure"`
-
- // The tag status with which to filter your ListImages results. You can filter
- // results based on whether they are TAGGED or UNTAGGED.
- TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImagesFilter) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImagesFilter) GoString() string {
- return s.String()
-}
-
-// SetTagStatus sets the TagStatus field's value.
-func (s *ListImagesFilter) SetTagStatus(v string) *ListImagesFilter {
- s.TagStatus = &v
- return s
-}
-
-type ListImagesInput struct {
- _ struct{} `type:"structure"`
-
- // The filter key and value with which to filter your ListImages results.
- Filter *ListImagesFilter `locationName:"filter" type:"structure"`
-
- // The maximum number of image results returned by ListImages in paginated output.
- // When this parameter is used, ListImages only returns maxResults results in
- // a single page along with a nextToken response element. The remaining results
- // of the initial request can be seen by sending another ListImages request
- // with the returned nextToken value. This value can be between 1 and 1000.
- // If this parameter is not used, then ListImages returns up to 100 results
- // and a nextToken value, if applicable.
- MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
-
- // The nextToken value returned from a previous paginated ListImages request
- // where maxResults was used and the results exceeded the value of that parameter.
- // Pagination continues from the end of the previous results that returned the
- // nextToken value. This value is null when there are no more results to return.
- //
- // This token should be treated as an opaque identifier that is only used to
- // retrieve the next items in a list and not for other programmatic purposes.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository in which to list images. If you do not specify a registry,
- // the default registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository with image IDs to be listed.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImagesInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImagesInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListImagesInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListImagesInput"}
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetFilter sets the Filter field's value.
-func (s *ListImagesInput) SetFilter(v *ListImagesFilter) *ListImagesInput {
- s.Filter = v
- return s
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *ListImagesInput) SetMaxResults(v int64) *ListImagesInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListImagesInput) SetNextToken(v string) *ListImagesInput {
- s.NextToken = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *ListImagesInput) SetRegistryId(v string) *ListImagesInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *ListImagesInput) SetRepositoryName(v string) *ListImagesInput {
- s.RepositoryName = &v
- return s
-}
-
-type ListImagesOutput struct {
- _ struct{} `type:"structure"`
-
- // The list of image IDs for the requested repository.
- ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
-
- // The nextToken value to include in a future ListImages request. When the results
- // of a ListImages request exceed maxResults, this value can be used to retrieve
- // the next page of results. This value is null when there are no more results
- // to return.
- NextToken *string `locationName:"nextToken" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImagesOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImagesOutput) GoString() string {
- return s.String()
-}
-
-// SetImageIds sets the ImageIds field's value.
-func (s *ListImagesOutput) SetImageIds(v []*ImageIdentifier) *ListImagesOutput {
- s.ImageIds = v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListImagesOutput) SetNextToken(v string) *ListImagesOutput {
- s.NextToken = &v
- return s
-}
-
-type ListTagsForResourceInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) that identifies the resource for which to
- // list the tags. Currently, the only supported resource is an Amazon ECR repository.
- //
- // ResourceArn is a required field
- ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTagsForResourceInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTagsForResourceInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListTagsForResourceInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"}
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput {
- s.ResourceArn = &v
- return s
-}
-
-type ListTagsForResourceOutput struct {
- _ struct{} `type:"structure"`
-
- // The tags for the resource.
- Tags []*Tag `locationName:"tags" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTagsForResourceOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTagsForResourceOutput) GoString() string {
- return s.String()
-}
-
-// SetTags sets the Tags field's value.
-func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput {
- s.Tags = v
- return s
-}
-
-// Information about a package vulnerability finding.
-type PackageVulnerabilityDetails struct {
- _ struct{} `type:"structure"`
-
- // An object that contains details about the CVSS score of a finding.
- Cvss []*CvssScore `locationName:"cvss" type:"list"`
-
- // One or more URLs that contain details about this vulnerability type.
- ReferenceUrls []*string `locationName:"referenceUrls" type:"list"`
-
- // One or more vulnerabilities related to the one identified in this finding.
- RelatedVulnerabilities []*string `locationName:"relatedVulnerabilities" type:"list"`
-
- // The source of the vulnerability information.
- Source *string `locationName:"source" type:"string"`
-
- // A URL to the source of the vulnerability information.
- SourceUrl *string `locationName:"sourceUrl" type:"string"`
-
- // The date and time that this vulnerability was first added to the vendor's
- // database.
- VendorCreatedAt *time.Time `locationName:"vendorCreatedAt" type:"timestamp"`
-
- // The severity the vendor has given to this vulnerability type.
- VendorSeverity *string `locationName:"vendorSeverity" type:"string"`
-
- // The date and time the vendor last updated this vulnerability in their database.
- VendorUpdatedAt *time.Time `locationName:"vendorUpdatedAt" type:"timestamp"`
-
- // The ID given to this vulnerability.
- VulnerabilityId *string `locationName:"vulnerabilityId" type:"string"`
-
- // The packages impacted by this vulnerability.
- VulnerablePackages []*VulnerablePackage `locationName:"vulnerablePackages" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PackageVulnerabilityDetails) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PackageVulnerabilityDetails) GoString() string {
- return s.String()
-}
-
-// SetCvss sets the Cvss field's value.
-func (s *PackageVulnerabilityDetails) SetCvss(v []*CvssScore) *PackageVulnerabilityDetails {
- s.Cvss = v
- return s
-}
-
-// SetReferenceUrls sets the ReferenceUrls field's value.
-func (s *PackageVulnerabilityDetails) SetReferenceUrls(v []*string) *PackageVulnerabilityDetails {
- s.ReferenceUrls = v
- return s
-}
-
-// SetRelatedVulnerabilities sets the RelatedVulnerabilities field's value.
-func (s *PackageVulnerabilityDetails) SetRelatedVulnerabilities(v []*string) *PackageVulnerabilityDetails {
- s.RelatedVulnerabilities = v
- return s
-}
-
-// SetSource sets the Source field's value.
-func (s *PackageVulnerabilityDetails) SetSource(v string) *PackageVulnerabilityDetails {
- s.Source = &v
- return s
-}
-
-// SetSourceUrl sets the SourceUrl field's value.
-func (s *PackageVulnerabilityDetails) SetSourceUrl(v string) *PackageVulnerabilityDetails {
- s.SourceUrl = &v
- return s
-}
-
-// SetVendorCreatedAt sets the VendorCreatedAt field's value.
-func (s *PackageVulnerabilityDetails) SetVendorCreatedAt(v time.Time) *PackageVulnerabilityDetails {
- s.VendorCreatedAt = &v
- return s
-}
-
-// SetVendorSeverity sets the VendorSeverity field's value.
-func (s *PackageVulnerabilityDetails) SetVendorSeverity(v string) *PackageVulnerabilityDetails {
- s.VendorSeverity = &v
- return s
-}
-
-// SetVendorUpdatedAt sets the VendorUpdatedAt field's value.
-func (s *PackageVulnerabilityDetails) SetVendorUpdatedAt(v time.Time) *PackageVulnerabilityDetails {
- s.VendorUpdatedAt = &v
- return s
-}
-
-// SetVulnerabilityId sets the VulnerabilityId field's value.
-func (s *PackageVulnerabilityDetails) SetVulnerabilityId(v string) *PackageVulnerabilityDetails {
- s.VulnerabilityId = &v
- return s
-}
-
-// SetVulnerablePackages sets the VulnerablePackages field's value.
-func (s *PackageVulnerabilityDetails) SetVulnerablePackages(v []*VulnerablePackage) *PackageVulnerabilityDetails {
- s.VulnerablePackages = v
- return s
-}
-
-// The details of a pull through cache rule.
-type PullThroughCacheRule struct {
- _ struct{} `type:"structure"`
-
- // The date and time the pull through cache was created.
- CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
-
- // The Amazon ECR repository prefix associated with the pull through cache rule.
- EcrRepositoryPrefix *string `locationName:"ecrRepositoryPrefix" min:"2" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry the pull
- // through cache rule is associated with.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The upstream registry URL associated with the pull through cache rule.
- UpstreamRegistryUrl *string `locationName:"upstreamRegistryUrl" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PullThroughCacheRule) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PullThroughCacheRule) GoString() string {
- return s.String()
-}
-
-// SetCreatedAt sets the CreatedAt field's value.
-func (s *PullThroughCacheRule) SetCreatedAt(v time.Time) *PullThroughCacheRule {
- s.CreatedAt = &v
- return s
-}
-
-// SetEcrRepositoryPrefix sets the EcrRepositoryPrefix field's value.
-func (s *PullThroughCacheRule) SetEcrRepositoryPrefix(v string) *PullThroughCacheRule {
- s.EcrRepositoryPrefix = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PullThroughCacheRule) SetRegistryId(v string) *PullThroughCacheRule {
- s.RegistryId = &v
- return s
-}
-
-// SetUpstreamRegistryUrl sets the UpstreamRegistryUrl field's value.
-func (s *PullThroughCacheRule) SetUpstreamRegistryUrl(v string) *PullThroughCacheRule {
- s.UpstreamRegistryUrl = &v
- return s
-}
-
-// A pull through cache rule with these settings already exists for the private
-// registry.
-type PullThroughCacheRuleAlreadyExistsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PullThroughCacheRuleAlreadyExistsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PullThroughCacheRuleAlreadyExistsException) GoString() string {
- return s.String()
-}
-
-func newErrorPullThroughCacheRuleAlreadyExistsException(v protocol.ResponseMetadata) error {
- return &PullThroughCacheRuleAlreadyExistsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *PullThroughCacheRuleAlreadyExistsException) Code() string {
- return "PullThroughCacheRuleAlreadyExistsException"
-}
-
-// Message returns the exception's message.
-func (s *PullThroughCacheRuleAlreadyExistsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *PullThroughCacheRuleAlreadyExistsException) OrigErr() error {
- return nil
-}
-
-func (s *PullThroughCacheRuleAlreadyExistsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *PullThroughCacheRuleAlreadyExistsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *PullThroughCacheRuleAlreadyExistsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The pull through cache rule was not found. Specify a valid pull through cache
-// rule and try again.
-type PullThroughCacheRuleNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PullThroughCacheRuleNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PullThroughCacheRuleNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorPullThroughCacheRuleNotFoundException(v protocol.ResponseMetadata) error {
- return &PullThroughCacheRuleNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *PullThroughCacheRuleNotFoundException) Code() string {
- return "PullThroughCacheRuleNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *PullThroughCacheRuleNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *PullThroughCacheRuleNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *PullThroughCacheRuleNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *PullThroughCacheRuleNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *PullThroughCacheRuleNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type PutImageInput struct {
- _ struct{} `type:"structure"`
-
- // The image digest of the image manifest corresponding to the image.
- ImageDigest *string `locationName:"imageDigest" type:"string"`
-
- // The image manifest corresponding to the image to be uploaded.
- //
- // ImageManifest is a required field
- ImageManifest *string `locationName:"imageManifest" min:"1" type:"string" required:"true"`
-
- // The media type of the image manifest. If you push an image manifest that
- // does not contain the mediaType field, you must specify the imageManifestMediaType
- // in the request.
- ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"`
-
- // The tag to associate with the image. This parameter is required for images
- // that use the Docker Image Manifest V2 Schema 2 or Open Container Initiative
- // (OCI) formats.
- ImageTag *string `locationName:"imageTag" min:"1" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository in which to put the image. If you do not specify a registry,
- // the default registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository in which to put the image.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutImageInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutImageInput"}
- if s.ImageManifest == nil {
- invalidParams.Add(request.NewErrParamRequired("ImageManifest"))
- }
- if s.ImageManifest != nil && len(*s.ImageManifest) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ImageManifest", 1))
- }
- if s.ImageTag != nil && len(*s.ImageTag) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ImageTag", 1))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImageDigest sets the ImageDigest field's value.
-func (s *PutImageInput) SetImageDigest(v string) *PutImageInput {
- s.ImageDigest = &v
- return s
-}
-
-// SetImageManifest sets the ImageManifest field's value.
-func (s *PutImageInput) SetImageManifest(v string) *PutImageInput {
- s.ImageManifest = &v
- return s
-}
-
-// SetImageManifestMediaType sets the ImageManifestMediaType field's value.
-func (s *PutImageInput) SetImageManifestMediaType(v string) *PutImageInput {
- s.ImageManifestMediaType = &v
- return s
-}
-
-// SetImageTag sets the ImageTag field's value.
-func (s *PutImageInput) SetImageTag(v string) *PutImageInput {
- s.ImageTag = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PutImageInput) SetRegistryId(v string) *PutImageInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *PutImageInput) SetRepositoryName(v string) *PutImageInput {
- s.RepositoryName = &v
- return s
-}
-
-type PutImageOutput struct {
- _ struct{} `type:"structure"`
-
- // Details of the image uploaded.
- Image *Image `locationName:"image" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageOutput) GoString() string {
- return s.String()
-}
-
-// SetImage sets the Image field's value.
-func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput {
- s.Image = v
- return s
-}
-
-type PutImageScanningConfigurationInput struct {
- _ struct{} `type:"structure"`
-
- // The image scanning configuration for the repository. This setting determines
- // whether images are scanned for known vulnerabilities after being pushed to
- // the repository.
- //
- // ImageScanningConfiguration is a required field
- ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository in which to update the image scanning configuration setting.
- // If you do not specify a registry, the default registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository in which to update the image scanning configuration
- // setting.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageScanningConfigurationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageScanningConfigurationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutImageScanningConfigurationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutImageScanningConfigurationInput"}
- if s.ImageScanningConfiguration == nil {
- invalidParams.Add(request.NewErrParamRequired("ImageScanningConfiguration"))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value.
-func (s *PutImageScanningConfigurationInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationInput {
- s.ImageScanningConfiguration = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PutImageScanningConfigurationInput) SetRegistryId(v string) *PutImageScanningConfigurationInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *PutImageScanningConfigurationInput) SetRepositoryName(v string) *PutImageScanningConfigurationInput {
- s.RepositoryName = &v
- return s
-}
-
-type PutImageScanningConfigurationOutput struct {
- _ struct{} `type:"structure"`
-
- // The image scanning configuration setting for the repository.
- ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageScanningConfigurationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageScanningConfigurationOutput) GoString() string {
- return s.String()
-}
-
-// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value.
-func (s *PutImageScanningConfigurationOutput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationOutput {
- s.ImageScanningConfiguration = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PutImageScanningConfigurationOutput) SetRegistryId(v string) *PutImageScanningConfigurationOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *PutImageScanningConfigurationOutput) SetRepositoryName(v string) *PutImageScanningConfigurationOutput {
- s.RepositoryName = &v
- return s
-}
-
-type PutImageTagMutabilityInput struct {
- _ struct{} `type:"structure"`
-
- // The tag mutability setting for the repository. If MUTABLE is specified, image
- // tags can be overwritten. If IMMUTABLE is specified, all image tags within
- // the repository will be immutable which will prevent them from being overwritten.
- //
- // ImageTagMutability is a required field
- ImageTagMutability *string `locationName:"imageTagMutability" type:"string" required:"true" enum:"ImageTagMutability"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository in which to update the image tag mutability settings. If you
- // do not specify a registry, the default registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository in which to update the image tag mutability settings.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageTagMutabilityInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageTagMutabilityInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutImageTagMutabilityInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutImageTagMutabilityInput"}
- if s.ImageTagMutability == nil {
- invalidParams.Add(request.NewErrParamRequired("ImageTagMutability"))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImageTagMutability sets the ImageTagMutability field's value.
-func (s *PutImageTagMutabilityInput) SetImageTagMutability(v string) *PutImageTagMutabilityInput {
- s.ImageTagMutability = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PutImageTagMutabilityInput) SetRegistryId(v string) *PutImageTagMutabilityInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *PutImageTagMutabilityInput) SetRepositoryName(v string) *PutImageTagMutabilityInput {
- s.RepositoryName = &v
- return s
-}
-
-type PutImageTagMutabilityOutput struct {
- _ struct{} `type:"structure"`
-
- // The image tag mutability setting for the repository.
- ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageTagMutabilityOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutImageTagMutabilityOutput) GoString() string {
- return s.String()
-}
-
-// SetImageTagMutability sets the ImageTagMutability field's value.
-func (s *PutImageTagMutabilityOutput) SetImageTagMutability(v string) *PutImageTagMutabilityOutput {
- s.ImageTagMutability = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PutImageTagMutabilityOutput) SetRegistryId(v string) *PutImageTagMutabilityOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *PutImageTagMutabilityOutput) SetRepositoryName(v string) *PutImageTagMutabilityOutput {
- s.RepositoryName = &v
- return s
-}
-
-type PutLifecyclePolicyInput struct {
- _ struct{} `type:"structure"`
-
- // The JSON repository policy text to apply to the repository.
- //
- // LifecyclePolicyText is a required field
- LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository. If you do not specify a registry, the default registry is
- // assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository to receive the policy.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutLifecyclePolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutLifecyclePolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutLifecyclePolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutLifecyclePolicyInput"}
- if s.LifecyclePolicyText == nil {
- invalidParams.Add(request.NewErrParamRequired("LifecyclePolicyText"))
- }
- if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 {
- invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
-func (s *PutLifecyclePolicyInput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyInput {
- s.LifecyclePolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PutLifecyclePolicyInput) SetRegistryId(v string) *PutLifecyclePolicyInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *PutLifecyclePolicyInput) SetRepositoryName(v string) *PutLifecyclePolicyInput {
- s.RepositoryName = &v
- return s
-}
-
-type PutLifecyclePolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The JSON repository policy text.
- LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutLifecyclePolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutLifecyclePolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
-func (s *PutLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyOutput {
- s.LifecyclePolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PutLifecyclePolicyOutput) SetRegistryId(v string) *PutLifecyclePolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *PutLifecyclePolicyOutput) SetRepositoryName(v string) *PutLifecyclePolicyOutput {
- s.RepositoryName = &v
- return s
-}
-
-type PutRegistryPolicyInput struct {
- _ struct{} `type:"structure"`
-
- // The JSON policy text to apply to your registry. The policy text follows the
- // same format as IAM policy text. For more information, see Registry permissions
- // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html)
- // in the Amazon Elastic Container Registry User Guide.
- //
- // PolicyText is a required field
- PolicyText *string `locationName:"policyText" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRegistryPolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRegistryPolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutRegistryPolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutRegistryPolicyInput"}
- if s.PolicyText == nil {
- invalidParams.Add(request.NewErrParamRequired("PolicyText"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetPolicyText sets the PolicyText field's value.
-func (s *PutRegistryPolicyInput) SetPolicyText(v string) *PutRegistryPolicyInput {
- s.PolicyText = &v
- return s
-}
-
-type PutRegistryPolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The JSON policy text for your registry.
- PolicyText *string `locationName:"policyText" type:"string"`
-
- // The registry ID.
- RegistryId *string `locationName:"registryId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRegistryPolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRegistryPolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetPolicyText sets the PolicyText field's value.
-func (s *PutRegistryPolicyOutput) SetPolicyText(v string) *PutRegistryPolicyOutput {
- s.PolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *PutRegistryPolicyOutput) SetRegistryId(v string) *PutRegistryPolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-type PutRegistryScanningConfigurationInput struct {
- _ struct{} `type:"structure"`
-
- // The scanning rules to use for the registry. A scanning rule is used to determine
- // which repository filters are used and at what frequency scanning will occur.
- Rules []*RegistryScanningRule `locationName:"rules" type:"list"`
-
- // The scanning type to set for the registry.
- //
- // When a registry scanning configuration is not defined, by default the BASIC
- // scan type is used. When basic scanning is used, you may specify filters to
- // determine which individual repositories, or all repositories, are scanned
- // when new images are pushed to those repositories. Alternatively, you can
- // do manual scans of images with basic scanning.
- //
- // When the ENHANCED scan type is set, Amazon Inspector provides automated vulnerability
- // scanning. You may choose between continuous scanning or scan on push and
- // you may specify filters to determine which individual repositories, or all
- // repositories, are scanned.
- ScanType *string `locationName:"scanType" type:"string" enum:"ScanType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRegistryScanningConfigurationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRegistryScanningConfigurationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutRegistryScanningConfigurationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutRegistryScanningConfigurationInput"}
- if s.Rules != nil {
- for i, v := range s.Rules {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRules sets the Rules field's value.
-func (s *PutRegistryScanningConfigurationInput) SetRules(v []*RegistryScanningRule) *PutRegistryScanningConfigurationInput {
- s.Rules = v
- return s
-}
-
-// SetScanType sets the ScanType field's value.
-func (s *PutRegistryScanningConfigurationInput) SetScanType(v string) *PutRegistryScanningConfigurationInput {
- s.ScanType = &v
- return s
-}
-
-type PutRegistryScanningConfigurationOutput struct {
- _ struct{} `type:"structure"`
-
- // The scanning configuration for your registry.
- RegistryScanningConfiguration *RegistryScanningConfiguration `locationName:"registryScanningConfiguration" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRegistryScanningConfigurationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRegistryScanningConfigurationOutput) GoString() string {
- return s.String()
-}
-
-// SetRegistryScanningConfiguration sets the RegistryScanningConfiguration field's value.
-func (s *PutRegistryScanningConfigurationOutput) SetRegistryScanningConfiguration(v *RegistryScanningConfiguration) *PutRegistryScanningConfigurationOutput {
- s.RegistryScanningConfiguration = v
- return s
-}
-
-type PutReplicationConfigurationInput struct {
- _ struct{} `type:"structure"`
-
- // An object representing the replication configuration for a registry.
- //
- // ReplicationConfiguration is a required field
- ReplicationConfiguration *ReplicationConfiguration `locationName:"replicationConfiguration" type:"structure" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutReplicationConfigurationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutReplicationConfigurationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutReplicationConfigurationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutReplicationConfigurationInput"}
- if s.ReplicationConfiguration == nil {
- invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration"))
- }
- if s.ReplicationConfiguration != nil {
- if err := s.ReplicationConfiguration.Validate(); err != nil {
- invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
-func (s *PutReplicationConfigurationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutReplicationConfigurationInput {
- s.ReplicationConfiguration = v
- return s
-}
-
-type PutReplicationConfigurationOutput struct {
- _ struct{} `type:"structure"`
-
- // The contents of the replication configuration for the registry.
- ReplicationConfiguration *ReplicationConfiguration `locationName:"replicationConfiguration" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutReplicationConfigurationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutReplicationConfigurationOutput) GoString() string {
- return s.String()
-}
-
-// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
-func (s *PutReplicationConfigurationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutReplicationConfigurationOutput {
- s.ReplicationConfiguration = v
- return s
-}
-
-// Details about the recommended course of action to remediate the finding.
-type Recommendation struct {
- _ struct{} `type:"structure"`
-
- // The recommended course of action to remediate the finding.
- Text *string `locationName:"text" type:"string"`
-
- // The URL address to the CVE remediation recommendations.
- Url *string `locationName:"url" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Recommendation) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Recommendation) GoString() string {
- return s.String()
-}
-
-// SetText sets the Text field's value.
-func (s *Recommendation) SetText(v string) *Recommendation {
- s.Text = &v
- return s
-}
-
-// SetUrl sets the Url field's value.
-func (s *Recommendation) SetUrl(v string) *Recommendation {
- s.Url = &v
- return s
-}
-
-// The manifest list is referencing an image that does not exist.
-type ReferencedImagesNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReferencedImagesNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReferencedImagesNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorReferencedImagesNotFoundException(v protocol.ResponseMetadata) error {
- return &ReferencedImagesNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ReferencedImagesNotFoundException) Code() string {
- return "ReferencedImagesNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *ReferencedImagesNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ReferencedImagesNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *ReferencedImagesNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ReferencedImagesNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ReferencedImagesNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The registry doesn't have an associated registry policy.
-type RegistryPolicyNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegistryPolicyNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegistryPolicyNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorRegistryPolicyNotFoundException(v protocol.ResponseMetadata) error {
- return &RegistryPolicyNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *RegistryPolicyNotFoundException) Code() string {
- return "RegistryPolicyNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *RegistryPolicyNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *RegistryPolicyNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *RegistryPolicyNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *RegistryPolicyNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *RegistryPolicyNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The scanning configuration for a private registry.
-type RegistryScanningConfiguration struct {
- _ struct{} `type:"structure"`
-
- // The scanning rules associated with the registry.
- Rules []*RegistryScanningRule `locationName:"rules" type:"list"`
-
- // The type of scanning configured for the registry.
- ScanType *string `locationName:"scanType" type:"string" enum:"ScanType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegistryScanningConfiguration) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegistryScanningConfiguration) GoString() string {
- return s.String()
-}
-
-// SetRules sets the Rules field's value.
-func (s *RegistryScanningConfiguration) SetRules(v []*RegistryScanningRule) *RegistryScanningConfiguration {
- s.Rules = v
- return s
-}
-
-// SetScanType sets the ScanType field's value.
-func (s *RegistryScanningConfiguration) SetScanType(v string) *RegistryScanningConfiguration {
- s.ScanType = &v
- return s
-}
-
-// The details of a scanning rule for a private registry.
-type RegistryScanningRule struct {
- _ struct{} `type:"structure"`
-
- // The repository filters associated with the scanning configuration for a private
- // registry.
- //
- // RepositoryFilters is a required field
- RepositoryFilters []*ScanningRepositoryFilter `locationName:"repositoryFilters" type:"list" required:"true"`
-
- // The frequency that scans are performed at for a private registry. When the
- // ENHANCED scan type is specified, the supported scan frequencies are CONTINUOUS_SCAN
- // and SCAN_ON_PUSH. When the BASIC scan type is specified, the SCAN_ON_PUSH
- // and MANUAL scan frequencies are supported.
- //
- // ScanFrequency is a required field
- ScanFrequency *string `locationName:"scanFrequency" type:"string" required:"true" enum:"ScanFrequency"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegistryScanningRule) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegistryScanningRule) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *RegistryScanningRule) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "RegistryScanningRule"}
- if s.RepositoryFilters == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryFilters"))
- }
- if s.ScanFrequency == nil {
- invalidParams.Add(request.NewErrParamRequired("ScanFrequency"))
- }
- if s.RepositoryFilters != nil {
- for i, v := range s.RepositoryFilters {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RepositoryFilters", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRepositoryFilters sets the RepositoryFilters field's value.
-func (s *RegistryScanningRule) SetRepositoryFilters(v []*ScanningRepositoryFilter) *RegistryScanningRule {
- s.RepositoryFilters = v
- return s
-}
-
-// SetScanFrequency sets the ScanFrequency field's value.
-func (s *RegistryScanningRule) SetScanFrequency(v string) *RegistryScanningRule {
- s.ScanFrequency = &v
- return s
-}
-
-// Information on how to remediate a finding.
-type Remediation struct {
- _ struct{} `type:"structure"`
-
- // An object that contains information about the recommended course of action
- // to remediate the finding.
- Recommendation *Recommendation `locationName:"recommendation" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Remediation) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Remediation) GoString() string {
- return s.String()
-}
-
-// SetRecommendation sets the Recommendation field's value.
-func (s *Remediation) SetRecommendation(v *Recommendation) *Remediation {
- s.Recommendation = v
- return s
-}
-
-// The replication configuration for a registry.
-type ReplicationConfiguration struct {
- _ struct{} `type:"structure"`
-
- // An array of objects representing the replication destinations and repository
- // filters for a replication configuration.
- //
- // Rules is a required field
- Rules []*ReplicationRule `locationName:"rules" type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicationConfiguration) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicationConfiguration) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicationConfiguration) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"}
- if s.Rules == nil {
- invalidParams.Add(request.NewErrParamRequired("Rules"))
- }
- if s.Rules != nil {
- for i, v := range s.Rules {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRules sets the Rules field's value.
-func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration {
- s.Rules = v
- return s
-}
-
-// An array of objects representing the destination for a replication rule.
-type ReplicationDestination struct {
- _ struct{} `type:"structure"`
-
- // The Region to replicate to.
- //
- // Region is a required field
- Region *string `locationName:"region" min:"2" type:"string" required:"true"`
-
- // The Amazon Web Services account ID of the Amazon ECR private registry to
- // replicate to. When configuring cross-Region replication within your own registry,
- // specify your own account ID.
- //
- // RegistryId is a required field
- RegistryId *string `locationName:"registryId" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicationDestination) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicationDestination) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicationDestination) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicationDestination"}
- if s.Region == nil {
- invalidParams.Add(request.NewErrParamRequired("Region"))
- }
- if s.Region != nil && len(*s.Region) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("Region", 2))
- }
- if s.RegistryId == nil {
- invalidParams.Add(request.NewErrParamRequired("RegistryId"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegion sets the Region field's value.
-func (s *ReplicationDestination) SetRegion(v string) *ReplicationDestination {
- s.Region = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *ReplicationDestination) SetRegistryId(v string) *ReplicationDestination {
- s.RegistryId = &v
- return s
-}
-
-// An array of objects representing the replication destinations and repository
-// filters for a replication configuration.
-type ReplicationRule struct {
- _ struct{} `type:"structure"`
-
- // An array of objects representing the destination for a replication rule.
- //
- // Destinations is a required field
- Destinations []*ReplicationDestination `locationName:"destinations" type:"list" required:"true"`
-
- // An array of objects representing the filters for a replication rule. Specifying
- // a repository filter for a replication rule provides a method for controlling
- // which repositories in a private registry are replicated.
- RepositoryFilters []*RepositoryFilter `locationName:"repositoryFilters" min:"1" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicationRule) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicationRule) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicationRule) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"}
- if s.Destinations == nil {
- invalidParams.Add(request.NewErrParamRequired("Destinations"))
- }
- if s.RepositoryFilters != nil && len(s.RepositoryFilters) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryFilters", 1))
- }
- if s.Destinations != nil {
- for i, v := range s.Destinations {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Destinations", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.RepositoryFilters != nil {
- for i, v := range s.RepositoryFilters {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RepositoryFilters", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetDestinations sets the Destinations field's value.
-func (s *ReplicationRule) SetDestinations(v []*ReplicationDestination) *ReplicationRule {
- s.Destinations = v
- return s
-}
-
-// SetRepositoryFilters sets the RepositoryFilters field's value.
-func (s *ReplicationRule) SetRepositoryFilters(v []*RepositoryFilter) *ReplicationRule {
- s.RepositoryFilters = v
- return s
-}
-
-// An object representing a repository.
-type Repository struct {
- _ struct{} `type:"structure"`
-
- // The date and time, in JavaScript date format, when the repository was created.
- CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
-
- // The encryption configuration for the repository. This determines how the
- // contents of your repository are encrypted at rest.
- EncryptionConfiguration *EncryptionConfiguration `locationName:"encryptionConfiguration" type:"structure"`
-
- // The image scanning configuration for a repository.
- ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"`
-
- // The tag mutability setting for the repository.
- ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains
- // the arn:aws:ecr namespace, followed by the region of the repository, Amazon
- // Web Services account ID of the repository owner, repository namespace, and
- // repository name. For example, arn:aws:ecr:region:012345678910:repository/test.
- RepositoryArn *string `locationName:"repositoryArn" type:"string"`
-
- // The name of the repository.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-
- // The URI for the repository. You can use this URI for container image push
- // and pull operations.
- RepositoryUri *string `locationName:"repositoryUri" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Repository) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Repository) GoString() string {
- return s.String()
-}
-
-// SetCreatedAt sets the CreatedAt field's value.
-func (s *Repository) SetCreatedAt(v time.Time) *Repository {
- s.CreatedAt = &v
- return s
-}
-
-// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
-func (s *Repository) SetEncryptionConfiguration(v *EncryptionConfiguration) *Repository {
- s.EncryptionConfiguration = v
- return s
-}
-
-// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value.
-func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository {
- s.ImageScanningConfiguration = v
- return s
-}
-
-// SetImageTagMutability sets the ImageTagMutability field's value.
-func (s *Repository) SetImageTagMutability(v string) *Repository {
- s.ImageTagMutability = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *Repository) SetRegistryId(v string) *Repository {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryArn sets the RepositoryArn field's value.
-func (s *Repository) SetRepositoryArn(v string) *Repository {
- s.RepositoryArn = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *Repository) SetRepositoryName(v string) *Repository {
- s.RepositoryName = &v
- return s
-}
-
-// SetRepositoryUri sets the RepositoryUri field's value.
-func (s *Repository) SetRepositoryUri(v string) *Repository {
- s.RepositoryUri = &v
- return s
-}
-
-// The specified repository already exists in the specified registry.
-type RepositoryAlreadyExistsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryAlreadyExistsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryAlreadyExistsException) GoString() string {
- return s.String()
-}
-
-func newErrorRepositoryAlreadyExistsException(v protocol.ResponseMetadata) error {
- return &RepositoryAlreadyExistsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *RepositoryAlreadyExistsException) Code() string {
- return "RepositoryAlreadyExistsException"
-}
-
-// Message returns the exception's message.
-func (s *RepositoryAlreadyExistsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *RepositoryAlreadyExistsException) OrigErr() error {
- return nil
-}
-
-func (s *RepositoryAlreadyExistsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *RepositoryAlreadyExistsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *RepositoryAlreadyExistsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The filter settings used with image replication. Specifying a repository
-// filter to a replication rule provides a method for controlling which repositories
-// in a private registry are replicated. If no repository filter is specified,
-// all images in the repository are replicated.
-type RepositoryFilter struct {
- _ struct{} `type:"structure"`
-
- // The repository filter details. When the PREFIX_MATCH filter type is specified,
- // this value is required and should be the repository name prefix to configure
- // replication for.
- //
- // Filter is a required field
- Filter *string `locationName:"filter" min:"2" type:"string" required:"true"`
-
- // The repository filter type. The only supported value is PREFIX_MATCH, which
- // is a repository name prefix specified with the filter parameter.
- //
- // FilterType is a required field
- FilterType *string `locationName:"filterType" type:"string" required:"true" enum:"RepositoryFilterType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryFilter) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryFilter) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *RepositoryFilter) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "RepositoryFilter"}
- if s.Filter == nil {
- invalidParams.Add(request.NewErrParamRequired("Filter"))
- }
- if s.Filter != nil && len(*s.Filter) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("Filter", 2))
- }
- if s.FilterType == nil {
- invalidParams.Add(request.NewErrParamRequired("FilterType"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetFilter sets the Filter field's value.
-func (s *RepositoryFilter) SetFilter(v string) *RepositoryFilter {
- s.Filter = &v
- return s
-}
-
-// SetFilterType sets the FilterType field's value.
-func (s *RepositoryFilter) SetFilterType(v string) *RepositoryFilter {
- s.FilterType = &v
- return s
-}
-
-// The specified repository contains images. To delete a repository that contains
-// images, you must force the deletion with the force parameter.
-type RepositoryNotEmptyException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryNotEmptyException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryNotEmptyException) GoString() string {
- return s.String()
-}
-
-func newErrorRepositoryNotEmptyException(v protocol.ResponseMetadata) error {
- return &RepositoryNotEmptyException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *RepositoryNotEmptyException) Code() string {
- return "RepositoryNotEmptyException"
-}
-
-// Message returns the exception's message.
-func (s *RepositoryNotEmptyException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *RepositoryNotEmptyException) OrigErr() error {
- return nil
-}
-
-func (s *RepositoryNotEmptyException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *RepositoryNotEmptyException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *RepositoryNotEmptyException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The specified repository could not be found. Check the spelling of the specified
-// repository and ensure that you are performing operations on the correct registry.
-type RepositoryNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorRepositoryNotFoundException(v protocol.ResponseMetadata) error {
- return &RepositoryNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *RepositoryNotFoundException) Code() string {
- return "RepositoryNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *RepositoryNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *RepositoryNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *RepositoryNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *RepositoryNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *RepositoryNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The specified repository and registry combination does not have an associated
-// repository policy.
-type RepositoryPolicyNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryPolicyNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryPolicyNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorRepositoryPolicyNotFoundException(v protocol.ResponseMetadata) error {
- return &RepositoryPolicyNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *RepositoryPolicyNotFoundException) Code() string {
- return "RepositoryPolicyNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *RepositoryPolicyNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *RepositoryPolicyNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *RepositoryPolicyNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *RepositoryPolicyNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *RepositoryPolicyNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The details of the scanning configuration for a repository.
-type RepositoryScanningConfiguration struct {
- _ struct{} `type:"structure"`
-
- // The scan filters applied to the repository.
- AppliedScanFilters []*ScanningRepositoryFilter `locationName:"appliedScanFilters" type:"list"`
-
- // The ARN of the repository.
- RepositoryArn *string `locationName:"repositoryArn" type:"string"`
-
- // The name of the repository.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-
- // The scan frequency for the repository.
- ScanFrequency *string `locationName:"scanFrequency" type:"string" enum:"ScanFrequency"`
-
- // Whether or not scan on push is configured for the repository.
- ScanOnPush *bool `locationName:"scanOnPush" type:"boolean"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryScanningConfiguration) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryScanningConfiguration) GoString() string {
- return s.String()
-}
-
-// SetAppliedScanFilters sets the AppliedScanFilters field's value.
-func (s *RepositoryScanningConfiguration) SetAppliedScanFilters(v []*ScanningRepositoryFilter) *RepositoryScanningConfiguration {
- s.AppliedScanFilters = v
- return s
-}
-
-// SetRepositoryArn sets the RepositoryArn field's value.
-func (s *RepositoryScanningConfiguration) SetRepositoryArn(v string) *RepositoryScanningConfiguration {
- s.RepositoryArn = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *RepositoryScanningConfiguration) SetRepositoryName(v string) *RepositoryScanningConfiguration {
- s.RepositoryName = &v
- return s
-}
-
-// SetScanFrequency sets the ScanFrequency field's value.
-func (s *RepositoryScanningConfiguration) SetScanFrequency(v string) *RepositoryScanningConfiguration {
- s.ScanFrequency = &v
- return s
-}
-
-// SetScanOnPush sets the ScanOnPush field's value.
-func (s *RepositoryScanningConfiguration) SetScanOnPush(v bool) *RepositoryScanningConfiguration {
- s.ScanOnPush = &v
- return s
-}
-
-// The details about any failures associated with the scanning configuration
-// of a repository.
-type RepositoryScanningConfigurationFailure struct {
- _ struct{} `type:"structure"`
-
- // The failure code.
- FailureCode *string `locationName:"failureCode" type:"string" enum:"ScanningConfigurationFailureCode"`
-
- // The reason for the failure.
- FailureReason *string `locationName:"failureReason" type:"string"`
-
- // The name of the repository.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryScanningConfigurationFailure) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RepositoryScanningConfigurationFailure) GoString() string {
- return s.String()
-}
-
-// SetFailureCode sets the FailureCode field's value.
-func (s *RepositoryScanningConfigurationFailure) SetFailureCode(v string) *RepositoryScanningConfigurationFailure {
- s.FailureCode = &v
- return s
-}
-
-// SetFailureReason sets the FailureReason field's value.
-func (s *RepositoryScanningConfigurationFailure) SetFailureReason(v string) *RepositoryScanningConfigurationFailure {
- s.FailureReason = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *RepositoryScanningConfigurationFailure) SetRepositoryName(v string) *RepositoryScanningConfigurationFailure {
- s.RepositoryName = &v
- return s
-}
-
-// Details about the resource involved in a finding.
-type Resource struct {
- _ struct{} `type:"structure"`
-
- // An object that contains details about the resource involved in a finding.
- Details *ResourceDetails `locationName:"details" type:"structure"`
-
- // The ID of the resource.
- Id *string `locationName:"id" type:"string"`
-
- // The tags attached to the resource.
- Tags map[string]*string `locationName:"tags" type:"map"`
-
- // The type of resource.
- Type *string `locationName:"type" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Resource) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Resource) GoString() string {
- return s.String()
-}
-
-// SetDetails sets the Details field's value.
-func (s *Resource) SetDetails(v *ResourceDetails) *Resource {
- s.Details = v
- return s
-}
-
-// SetId sets the Id field's value.
-func (s *Resource) SetId(v string) *Resource {
- s.Id = &v
- return s
-}
-
-// SetTags sets the Tags field's value.
-func (s *Resource) SetTags(v map[string]*string) *Resource {
- s.Tags = v
- return s
-}
-
-// SetType sets the Type field's value.
-func (s *Resource) SetType(v string) *Resource {
- s.Type = &v
- return s
-}
-
-// Contains details about the resource involved in the finding.
-type ResourceDetails struct {
- _ struct{} `type:"structure"`
-
- // An object that contains details about the Amazon ECR container image involved
- // in the finding.
- AwsEcrContainerImage *AwsEcrContainerImageDetails `locationName:"awsEcrContainerImage" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ResourceDetails) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ResourceDetails) GoString() string {
- return s.String()
-}
-
-// SetAwsEcrContainerImage sets the AwsEcrContainerImage field's value.
-func (s *ResourceDetails) SetAwsEcrContainerImage(v *AwsEcrContainerImageDetails) *ResourceDetails {
- s.AwsEcrContainerImage = v
- return s
-}
-
-// The specified image scan could not be found. Ensure that image scanning is
-// enabled on the repository and try again.
-type ScanNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScanNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScanNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorScanNotFoundException(v protocol.ResponseMetadata) error {
- return &ScanNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ScanNotFoundException) Code() string {
- return "ScanNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *ScanNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ScanNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *ScanNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ScanNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ScanNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The details of a scanning repository filter. For more information on how
-// to use filters, see Using filters (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#image-scanning-filters)
-// in the Amazon Elastic Container Registry User Guide.
-type ScanningRepositoryFilter struct {
- _ struct{} `type:"structure"`
-
- // The filter to use when scanning.
- //
- // Filter is a required field
- Filter *string `locationName:"filter" min:"1" type:"string" required:"true"`
-
- // The type associated with the filter.
- //
- // FilterType is a required field
- FilterType *string `locationName:"filterType" type:"string" required:"true" enum:"ScanningRepositoryFilterType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScanningRepositoryFilter) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScanningRepositoryFilter) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ScanningRepositoryFilter) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ScanningRepositoryFilter"}
- if s.Filter == nil {
- invalidParams.Add(request.NewErrParamRequired("Filter"))
- }
- if s.Filter != nil && len(*s.Filter) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Filter", 1))
- }
- if s.FilterType == nil {
- invalidParams.Add(request.NewErrParamRequired("FilterType"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetFilter sets the Filter field's value.
-func (s *ScanningRepositoryFilter) SetFilter(v string) *ScanningRepositoryFilter {
- s.Filter = &v
- return s
-}
-
-// SetFilterType sets the FilterType field's value.
-func (s *ScanningRepositoryFilter) SetFilterType(v string) *ScanningRepositoryFilter {
- s.FilterType = &v
- return s
-}
-
-// Information about the Amazon Inspector score given to a finding.
-type ScoreDetails struct {
- _ struct{} `type:"structure"`
-
- // An object that contains details about the CVSS score given to a finding.
- Cvss *CvssScoreDetails `locationName:"cvss" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScoreDetails) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScoreDetails) GoString() string {
- return s.String()
-}
-
-// SetCvss sets the Cvss field's value.
-func (s *ScoreDetails) SetCvss(v *CvssScoreDetails) *ScoreDetails {
- s.Cvss = v
- return s
-}
-
-// These errors are usually caused by a server-side issue.
-type ServerException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ServerException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ServerException) GoString() string {
- return s.String()
-}
-
-func newErrorServerException(v protocol.ResponseMetadata) error {
- return &ServerException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ServerException) Code() string {
- return "ServerException"
-}
-
-// Message returns the exception's message.
-func (s *ServerException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ServerException) OrigErr() error {
- return nil
-}
-
-func (s *ServerException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ServerException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ServerException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type SetRepositoryPolicyInput struct {
- _ struct{} `type:"structure"`
-
- // If the policy you are attempting to set on a repository policy would prevent
- // you from setting another policy in the future, you must force the SetRepositoryPolicy
- // operation. This is intended to prevent accidental repository lock outs.
- Force *bool `locationName:"force" type:"boolean"`
-
- // The JSON repository policy text to apply to the repository. For more information,
- // see Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html)
- // in the Amazon Elastic Container Registry User Guide.
- //
- // PolicyText is a required field
- PolicyText *string `locationName:"policyText" type:"string" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository. If you do not specify a registry, the default registry is
- // assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository to receive the policy.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SetRepositoryPolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SetRepositoryPolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *SetRepositoryPolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "SetRepositoryPolicyInput"}
- if s.PolicyText == nil {
- invalidParams.Add(request.NewErrParamRequired("PolicyText"))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetForce sets the Force field's value.
-func (s *SetRepositoryPolicyInput) SetForce(v bool) *SetRepositoryPolicyInput {
- s.Force = &v
- return s
-}
-
-// SetPolicyText sets the PolicyText field's value.
-func (s *SetRepositoryPolicyInput) SetPolicyText(v string) *SetRepositoryPolicyInput {
- s.PolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *SetRepositoryPolicyInput) SetRegistryId(v string) *SetRepositoryPolicyInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *SetRepositoryPolicyInput) SetRepositoryName(v string) *SetRepositoryPolicyInput {
- s.RepositoryName = &v
- return s
-}
-
-type SetRepositoryPolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The JSON repository policy text applied to the repository.
- PolicyText *string `locationName:"policyText" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SetRepositoryPolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SetRepositoryPolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetPolicyText sets the PolicyText field's value.
-func (s *SetRepositoryPolicyOutput) SetPolicyText(v string) *SetRepositoryPolicyOutput {
- s.PolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *SetRepositoryPolicyOutput) SetRegistryId(v string) *SetRepositoryPolicyOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPolicyOutput {
- s.RepositoryName = &v
- return s
-}
-
-type StartImageScanInput struct {
- _ struct{} `type:"structure"`
-
- // An object with identifying information for an image in an Amazon ECR repository.
- //
- // ImageId is a required field
- ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository in which to start an image scan request. If you do not specify
- // a registry, the default registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository that contains the images to scan.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartImageScanInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartImageScanInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *StartImageScanInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "StartImageScanInput"}
- if s.ImageId == nil {
- invalidParams.Add(request.NewErrParamRequired("ImageId"))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.ImageId != nil {
- if err := s.ImageId.Validate(); err != nil {
- invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImageId sets the ImageId field's value.
-func (s *StartImageScanInput) SetImageId(v *ImageIdentifier) *StartImageScanInput {
- s.ImageId = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *StartImageScanInput) SetRegistryId(v string) *StartImageScanInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *StartImageScanInput) SetRepositoryName(v string) *StartImageScanInput {
- s.RepositoryName = &v
- return s
-}
-
-type StartImageScanOutput struct {
- _ struct{} `type:"structure"`
-
- // An object with identifying information for an image in an Amazon ECR repository.
- ImageId *ImageIdentifier `locationName:"imageId" type:"structure"`
-
- // The current state of the scan.
- ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartImageScanOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartImageScanOutput) GoString() string {
- return s.String()
-}
-
-// SetImageId sets the ImageId field's value.
-func (s *StartImageScanOutput) SetImageId(v *ImageIdentifier) *StartImageScanOutput {
- s.ImageId = v
- return s
-}
-
-// SetImageScanStatus sets the ImageScanStatus field's value.
-func (s *StartImageScanOutput) SetImageScanStatus(v *ImageScanStatus) *StartImageScanOutput {
- s.ImageScanStatus = v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *StartImageScanOutput) SetRegistryId(v string) *StartImageScanOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *StartImageScanOutput) SetRepositoryName(v string) *StartImageScanOutput {
- s.RepositoryName = &v
- return s
-}
-
-type StartLifecyclePolicyPreviewInput struct {
- _ struct{} `type:"structure"`
-
- // The policy to be evaluated against. If you do not specify a policy, the current
- // policy for the repository is used.
- LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
-
- // The Amazon Web Services account ID associated with the registry that contains
- // the repository. If you do not specify a registry, the default registry is
- // assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository to be evaluated.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartLifecyclePolicyPreviewInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartLifecyclePolicyPreviewInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *StartLifecyclePolicyPreviewInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "StartLifecyclePolicyPreviewInput"}
- if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 {
- invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
-func (s *StartLifecyclePolicyPreviewInput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewInput {
- s.LifecyclePolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *StartLifecyclePolicyPreviewInput) SetRegistryId(v string) *StartLifecyclePolicyPreviewInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *StartLifecyclePolicyPreviewInput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewInput {
- s.RepositoryName = &v
- return s
-}
-
-type StartLifecyclePolicyPreviewOutput struct {
- _ struct{} `type:"structure"`
-
- // The JSON repository policy text.
- LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-
- // The status of the lifecycle policy preview request.
- Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartLifecyclePolicyPreviewOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartLifecyclePolicyPreviewOutput) GoString() string {
- return s.String()
-}
-
-// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
-func (s *StartLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewOutput {
- s.LifecyclePolicyText = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *StartLifecyclePolicyPreviewOutput) SetRegistryId(v string) *StartLifecyclePolicyPreviewOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *StartLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewOutput {
- s.RepositoryName = &v
- return s
-}
-
-// SetStatus sets the Status field's value.
-func (s *StartLifecyclePolicyPreviewOutput) SetStatus(v string) *StartLifecyclePolicyPreviewOutput {
- s.Status = &v
- return s
-}
-
-// The metadata to apply to a resource to help you categorize and organize them.
-// Each tag consists of a key and a value, both of which you define. Tag keys
-// can have a maximum character length of 128 characters, and tag values can
-// have a maximum length of 256 characters.
-type Tag struct {
- _ struct{} `type:"structure"`
-
- // One part of a key-value pair that make up a tag. A key is a general label
- // that acts like a category for more specific tag values.
- Key *string `type:"string"`
-
- // A value acts as a descriptor within a tag category (key).
- Value *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Tag) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Tag) GoString() string {
- return s.String()
-}
-
-// SetKey sets the Key field's value.
-func (s *Tag) SetKey(v string) *Tag {
- s.Key = &v
- return s
-}
-
-// SetValue sets the Value field's value.
-func (s *Tag) SetValue(v string) *Tag {
- s.Value = &v
- return s
-}
-
-type TagResourceInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) of the the resource to which to add tags.
- // Currently, the only supported resource is an Amazon ECR repository.
- //
- // ResourceArn is a required field
- ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"`
-
- // The tags to add to the resource. A tag is an array of key-value pairs. Tag
- // keys can have a maximum character length of 128 characters, and tag values
- // can have a maximum length of 256 characters.
- //
- // Tags is a required field
- Tags []*Tag `locationName:"tags" type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TagResourceInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TagResourceInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *TagResourceInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
- if s.Tags == nil {
- invalidParams.Add(request.NewErrParamRequired("Tags"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput {
- s.ResourceArn = &v
- return s
-}
-
-// SetTags sets the Tags field's value.
-func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput {
- s.Tags = v
- return s
-}
-
-type TagResourceOutput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TagResourceOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TagResourceOutput) GoString() string {
- return s.String()
-}
-
-// The list of tags on the repository is over the limit. The maximum number
-// of tags that can be applied to a repository is 50.
-type TooManyTagsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TooManyTagsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TooManyTagsException) GoString() string {
- return s.String()
-}
-
-func newErrorTooManyTagsException(v protocol.ResponseMetadata) error {
- return &TooManyTagsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *TooManyTagsException) Code() string {
- return "TooManyTagsException"
-}
-
-// Message returns the exception's message.
-func (s *TooManyTagsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *TooManyTagsException) OrigErr() error {
- return nil
-}
-
-func (s *TooManyTagsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *TooManyTagsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *TooManyTagsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The image is of a type that cannot be scanned.
-type UnsupportedImageTypeException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnsupportedImageTypeException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnsupportedImageTypeException) GoString() string {
- return s.String()
-}
-
-func newErrorUnsupportedImageTypeException(v protocol.ResponseMetadata) error {
- return &UnsupportedImageTypeException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *UnsupportedImageTypeException) Code() string {
- return "UnsupportedImageTypeException"
-}
-
-// Message returns the exception's message.
-func (s *UnsupportedImageTypeException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *UnsupportedImageTypeException) OrigErr() error {
- return nil
-}
-
-func (s *UnsupportedImageTypeException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *UnsupportedImageTypeException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *UnsupportedImageTypeException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The specified upstream registry isn't supported.
-type UnsupportedUpstreamRegistryException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnsupportedUpstreamRegistryException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnsupportedUpstreamRegistryException) GoString() string {
- return s.String()
-}
-
-func newErrorUnsupportedUpstreamRegistryException(v protocol.ResponseMetadata) error {
- return &UnsupportedUpstreamRegistryException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *UnsupportedUpstreamRegistryException) Code() string {
- return "UnsupportedUpstreamRegistryException"
-}
-
-// Message returns the exception's message.
-func (s *UnsupportedUpstreamRegistryException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *UnsupportedUpstreamRegistryException) OrigErr() error {
- return nil
-}
-
-func (s *UnsupportedUpstreamRegistryException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *UnsupportedUpstreamRegistryException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *UnsupportedUpstreamRegistryException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type UntagResourceInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) of the resource from which to remove tags.
- // Currently, the only supported resource is an Amazon ECR repository.
- //
- // ResourceArn is a required field
- ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"`
-
- // The keys of the tags to be removed.
- //
- // TagKeys is a required field
- TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UntagResourceInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UntagResourceInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UntagResourceInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
- if s.TagKeys == nil {
- invalidParams.Add(request.NewErrParamRequired("TagKeys"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput {
- s.ResourceArn = &v
- return s
-}
-
-// SetTagKeys sets the TagKeys field's value.
-func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput {
- s.TagKeys = v
- return s
-}
-
-type UntagResourceOutput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UntagResourceOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UntagResourceOutput) GoString() string {
- return s.String()
-}
-
-type UploadLayerPartInput struct {
- _ struct{} `type:"structure"`
-
- // The base64-encoded layer part payload.
- // LayerPartBlob is automatically base64 encoded/decoded by the SDK.
- //
- // LayerPartBlob is a required field
- LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"`
-
- // The position of the first byte of the layer part witin the overall image
- // layer.
- //
- // PartFirstByte is a required field
- PartFirstByte *int64 `locationName:"partFirstByte" type:"long" required:"true"`
-
- // The position of the last byte of the layer part within the overall image
- // layer.
- //
- // PartLastByte is a required field
- PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"`
-
- // The Amazon Web Services account ID associated with the registry to which
- // you are uploading layer parts. If you do not specify a registry, the default
- // registry is assumed.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The name of the repository to which you are uploading layer parts.
- //
- // RepositoryName is a required field
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
-
- // The upload ID from a previous InitiateLayerUpload operation to associate
- // with the layer part upload.
- //
- // UploadId is a required field
- UploadId *string `locationName:"uploadId" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UploadLayerPartInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UploadLayerPartInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UploadLayerPartInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UploadLayerPartInput"}
- if s.LayerPartBlob == nil {
- invalidParams.Add(request.NewErrParamRequired("LayerPartBlob"))
- }
- if s.PartFirstByte == nil {
- invalidParams.Add(request.NewErrParamRequired("PartFirstByte"))
- }
- if s.PartLastByte == nil {
- invalidParams.Add(request.NewErrParamRequired("PartLastByte"))
- }
- if s.RepositoryName == nil {
- invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
- }
- if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
- }
- if s.UploadId == nil {
- invalidParams.Add(request.NewErrParamRequired("UploadId"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetLayerPartBlob sets the LayerPartBlob field's value.
-func (s *UploadLayerPartInput) SetLayerPartBlob(v []byte) *UploadLayerPartInput {
- s.LayerPartBlob = v
- return s
-}
-
-// SetPartFirstByte sets the PartFirstByte field's value.
-func (s *UploadLayerPartInput) SetPartFirstByte(v int64) *UploadLayerPartInput {
- s.PartFirstByte = &v
- return s
-}
-
-// SetPartLastByte sets the PartLastByte field's value.
-func (s *UploadLayerPartInput) SetPartLastByte(v int64) *UploadLayerPartInput {
- s.PartLastByte = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *UploadLayerPartInput) SetRegistryId(v string) *UploadLayerPartInput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *UploadLayerPartInput) SetRepositoryName(v string) *UploadLayerPartInput {
- s.RepositoryName = &v
- return s
-}
-
-// SetUploadId sets the UploadId field's value.
-func (s *UploadLayerPartInput) SetUploadId(v string) *UploadLayerPartInput {
- s.UploadId = &v
- return s
-}
-
-type UploadLayerPartOutput struct {
- _ struct{} `type:"structure"`
-
- // The integer value of the last byte received in the request.
- LastByteReceived *int64 `locationName:"lastByteReceived" type:"long"`
-
- // The registry ID associated with the request.
- RegistryId *string `locationName:"registryId" type:"string"`
-
- // The repository name associated with the request.
- RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
-
- // The upload ID associated with the request.
- UploadId *string `locationName:"uploadId" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UploadLayerPartOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UploadLayerPartOutput) GoString() string {
- return s.String()
-}
-
-// SetLastByteReceived sets the LastByteReceived field's value.
-func (s *UploadLayerPartOutput) SetLastByteReceived(v int64) *UploadLayerPartOutput {
- s.LastByteReceived = &v
- return s
-}
-
-// SetRegistryId sets the RegistryId field's value.
-func (s *UploadLayerPartOutput) SetRegistryId(v string) *UploadLayerPartOutput {
- s.RegistryId = &v
- return s
-}
-
-// SetRepositoryName sets the RepositoryName field's value.
-func (s *UploadLayerPartOutput) SetRepositoryName(v string) *UploadLayerPartOutput {
- s.RepositoryName = &v
- return s
-}
-
-// SetUploadId sets the UploadId field's value.
-func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput {
- s.UploadId = &v
- return s
-}
-
-// The upload could not be found, or the specified upload ID is not valid for
-// this repository.
-type UploadNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The error message associated with the exception.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UploadNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UploadNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorUploadNotFoundException(v protocol.ResponseMetadata) error {
- return &UploadNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *UploadNotFoundException) Code() string {
- return "UploadNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *UploadNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *UploadNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *UploadNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *UploadNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *UploadNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// There was an exception validating this request.
-type ValidationException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ValidationException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ValidationException) GoString() string {
- return s.String()
-}
-
-func newErrorValidationException(v protocol.ResponseMetadata) error {
- return &ValidationException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ValidationException) Code() string {
- return "ValidationException"
-}
-
-// Message returns the exception's message.
-func (s *ValidationException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ValidationException) OrigErr() error {
- return nil
-}
-
-func (s *ValidationException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ValidationException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ValidationException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Information on the vulnerable package identified by a finding.
-type VulnerablePackage struct {
- _ struct{} `type:"structure"`
-
- // The architecture of the vulnerable package.
- Arch *string `locationName:"arch" type:"string"`
-
- // The epoch of the vulnerable package.
- Epoch *int64 `locationName:"epoch" type:"integer"`
-
- // The file path of the vulnerable package.
- FilePath *string `locationName:"filePath" type:"string"`
-
- // The name of the vulnerable package.
- Name *string `locationName:"name" type:"string"`
-
- // The package manager of the vulnerable package.
- PackageManager *string `locationName:"packageManager" type:"string"`
-
- // The release of the vulnerable package.
- Release *string `locationName:"release" type:"string"`
-
- // The source layer hash of the vulnerable package.
- SourceLayerHash *string `locationName:"sourceLayerHash" type:"string"`
-
- // The version of the vulnerable package.
- Version *string `locationName:"version" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s VulnerablePackage) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s VulnerablePackage) GoString() string {
- return s.String()
-}
-
-// SetArch sets the Arch field's value.
-func (s *VulnerablePackage) SetArch(v string) *VulnerablePackage {
- s.Arch = &v
- return s
-}
-
-// SetEpoch sets the Epoch field's value.
-func (s *VulnerablePackage) SetEpoch(v int64) *VulnerablePackage {
- s.Epoch = &v
- return s
-}
-
-// SetFilePath sets the FilePath field's value.
-func (s *VulnerablePackage) SetFilePath(v string) *VulnerablePackage {
- s.FilePath = &v
- return s
-}
-
-// SetName sets the Name field's value.
-func (s *VulnerablePackage) SetName(v string) *VulnerablePackage {
- s.Name = &v
- return s
-}
-
-// SetPackageManager sets the PackageManager field's value.
-func (s *VulnerablePackage) SetPackageManager(v string) *VulnerablePackage {
- s.PackageManager = &v
- return s
-}
-
-// SetRelease sets the Release field's value.
-func (s *VulnerablePackage) SetRelease(v string) *VulnerablePackage {
- s.Release = &v
- return s
-}
-
-// SetSourceLayerHash sets the SourceLayerHash field's value.
-func (s *VulnerablePackage) SetSourceLayerHash(v string) *VulnerablePackage {
- s.SourceLayerHash = &v
- return s
-}
-
-// SetVersion sets the Version field's value.
-func (s *VulnerablePackage) SetVersion(v string) *VulnerablePackage {
- s.Version = &v
- return s
-}
-
-const (
- // EncryptionTypeAes256 is a EncryptionType enum value
- EncryptionTypeAes256 = "AES256"
-
- // EncryptionTypeKms is a EncryptionType enum value
- EncryptionTypeKms = "KMS"
-)
-
-// EncryptionType_Values returns all elements of the EncryptionType enum
-func EncryptionType_Values() []string {
- return []string{
- EncryptionTypeAes256,
- EncryptionTypeKms,
- }
-}
-
-const (
- // FindingSeverityInformational is a FindingSeverity enum value
- FindingSeverityInformational = "INFORMATIONAL"
-
- // FindingSeverityLow is a FindingSeverity enum value
- FindingSeverityLow = "LOW"
-
- // FindingSeverityMedium is a FindingSeverity enum value
- FindingSeverityMedium = "MEDIUM"
-
- // FindingSeverityHigh is a FindingSeverity enum value
- FindingSeverityHigh = "HIGH"
-
- // FindingSeverityCritical is a FindingSeverity enum value
- FindingSeverityCritical = "CRITICAL"
-
- // FindingSeverityUndefined is a FindingSeverity enum value
- FindingSeverityUndefined = "UNDEFINED"
-)
-
-// FindingSeverity_Values returns all elements of the FindingSeverity enum
-func FindingSeverity_Values() []string {
- return []string{
- FindingSeverityInformational,
- FindingSeverityLow,
- FindingSeverityMedium,
- FindingSeverityHigh,
- FindingSeverityCritical,
- FindingSeverityUndefined,
- }
-}
-
-const (
- // ImageActionTypeExpire is a ImageActionType enum value
- ImageActionTypeExpire = "EXPIRE"
-)
-
-// ImageActionType_Values returns all elements of the ImageActionType enum
-func ImageActionType_Values() []string {
- return []string{
- ImageActionTypeExpire,
- }
-}
-
-const (
- // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value
- ImageFailureCodeInvalidImageDigest = "InvalidImageDigest"
-
- // ImageFailureCodeInvalidImageTag is a ImageFailureCode enum value
- ImageFailureCodeInvalidImageTag = "InvalidImageTag"
-
- // ImageFailureCodeImageTagDoesNotMatchDigest is a ImageFailureCode enum value
- ImageFailureCodeImageTagDoesNotMatchDigest = "ImageTagDoesNotMatchDigest"
-
- // ImageFailureCodeImageNotFound is a ImageFailureCode enum value
- ImageFailureCodeImageNotFound = "ImageNotFound"
-
- // ImageFailureCodeMissingDigestAndTag is a ImageFailureCode enum value
- ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag"
-
- // ImageFailureCodeImageReferencedByManifestList is a ImageFailureCode enum value
- ImageFailureCodeImageReferencedByManifestList = "ImageReferencedByManifestList"
-
- // ImageFailureCodeKmsError is a ImageFailureCode enum value
- ImageFailureCodeKmsError = "KmsError"
-)
-
-// ImageFailureCode_Values returns all elements of the ImageFailureCode enum
-func ImageFailureCode_Values() []string {
- return []string{
- ImageFailureCodeInvalidImageDigest,
- ImageFailureCodeInvalidImageTag,
- ImageFailureCodeImageTagDoesNotMatchDigest,
- ImageFailureCodeImageNotFound,
- ImageFailureCodeMissingDigestAndTag,
- ImageFailureCodeImageReferencedByManifestList,
- ImageFailureCodeKmsError,
- }
-}
-
-const (
- // ImageTagMutabilityMutable is a ImageTagMutability enum value
- ImageTagMutabilityMutable = "MUTABLE"
-
- // ImageTagMutabilityImmutable is a ImageTagMutability enum value
- ImageTagMutabilityImmutable = "IMMUTABLE"
-)
-
-// ImageTagMutability_Values returns all elements of the ImageTagMutability enum
-func ImageTagMutability_Values() []string {
- return []string{
- ImageTagMutabilityMutable,
- ImageTagMutabilityImmutable,
- }
-}
-
-const (
- // LayerAvailabilityAvailable is a LayerAvailability enum value
- LayerAvailabilityAvailable = "AVAILABLE"
-
- // LayerAvailabilityUnavailable is a LayerAvailability enum value
- LayerAvailabilityUnavailable = "UNAVAILABLE"
-)
-
-// LayerAvailability_Values returns all elements of the LayerAvailability enum
-func LayerAvailability_Values() []string {
- return []string{
- LayerAvailabilityAvailable,
- LayerAvailabilityUnavailable,
- }
-}
-
-const (
- // LayerFailureCodeInvalidLayerDigest is a LayerFailureCode enum value
- LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest"
-
- // LayerFailureCodeMissingLayerDigest is a LayerFailureCode enum value
- LayerFailureCodeMissingLayerDigest = "MissingLayerDigest"
-)
-
-// LayerFailureCode_Values returns all elements of the LayerFailureCode enum
-func LayerFailureCode_Values() []string {
- return []string{
- LayerFailureCodeInvalidLayerDigest,
- LayerFailureCodeMissingLayerDigest,
- }
-}
-
-const (
- // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value
- LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS"
-
- // LifecyclePolicyPreviewStatusComplete is a LifecyclePolicyPreviewStatus enum value
- LifecyclePolicyPreviewStatusComplete = "COMPLETE"
-
- // LifecyclePolicyPreviewStatusExpired is a LifecyclePolicyPreviewStatus enum value
- LifecyclePolicyPreviewStatusExpired = "EXPIRED"
-
- // LifecyclePolicyPreviewStatusFailed is a LifecyclePolicyPreviewStatus enum value
- LifecyclePolicyPreviewStatusFailed = "FAILED"
-)
-
-// LifecyclePolicyPreviewStatus_Values returns all elements of the LifecyclePolicyPreviewStatus enum
-func LifecyclePolicyPreviewStatus_Values() []string {
- return []string{
- LifecyclePolicyPreviewStatusInProgress,
- LifecyclePolicyPreviewStatusComplete,
- LifecyclePolicyPreviewStatusExpired,
- LifecyclePolicyPreviewStatusFailed,
- }
-}
-
-const (
- // ReplicationStatusInProgress is a ReplicationStatus enum value
- ReplicationStatusInProgress = "IN_PROGRESS"
-
- // ReplicationStatusComplete is a ReplicationStatus enum value
- ReplicationStatusComplete = "COMPLETE"
-
- // ReplicationStatusFailed is a ReplicationStatus enum value
- ReplicationStatusFailed = "FAILED"
-)
-
-// ReplicationStatus_Values returns all elements of the ReplicationStatus enum
-func ReplicationStatus_Values() []string {
- return []string{
- ReplicationStatusInProgress,
- ReplicationStatusComplete,
- ReplicationStatusFailed,
- }
-}
-
-const (
- // RepositoryFilterTypePrefixMatch is a RepositoryFilterType enum value
- RepositoryFilterTypePrefixMatch = "PREFIX_MATCH"
-)
-
-// RepositoryFilterType_Values returns all elements of the RepositoryFilterType enum
-func RepositoryFilterType_Values() []string {
- return []string{
- RepositoryFilterTypePrefixMatch,
- }
-}
-
-const (
- // ScanFrequencyScanOnPush is a ScanFrequency enum value
- ScanFrequencyScanOnPush = "SCAN_ON_PUSH"
-
- // ScanFrequencyContinuousScan is a ScanFrequency enum value
- ScanFrequencyContinuousScan = "CONTINUOUS_SCAN"
-
- // ScanFrequencyManual is a ScanFrequency enum value
- ScanFrequencyManual = "MANUAL"
-)
-
-// ScanFrequency_Values returns all elements of the ScanFrequency enum
-func ScanFrequency_Values() []string {
- return []string{
- ScanFrequencyScanOnPush,
- ScanFrequencyContinuousScan,
- ScanFrequencyManual,
- }
-}
-
-const (
- // ScanStatusInProgress is a ScanStatus enum value
- ScanStatusInProgress = "IN_PROGRESS"
-
- // ScanStatusComplete is a ScanStatus enum value
- ScanStatusComplete = "COMPLETE"
-
- // ScanStatusFailed is a ScanStatus enum value
- ScanStatusFailed = "FAILED"
-
- // ScanStatusUnsupportedImage is a ScanStatus enum value
- ScanStatusUnsupportedImage = "UNSUPPORTED_IMAGE"
-
- // ScanStatusActive is a ScanStatus enum value
- ScanStatusActive = "ACTIVE"
-
- // ScanStatusPending is a ScanStatus enum value
- ScanStatusPending = "PENDING"
-
- // ScanStatusScanEligibilityExpired is a ScanStatus enum value
- ScanStatusScanEligibilityExpired = "SCAN_ELIGIBILITY_EXPIRED"
-
- // ScanStatusFindingsUnavailable is a ScanStatus enum value
- ScanStatusFindingsUnavailable = "FINDINGS_UNAVAILABLE"
-)
-
-// ScanStatus_Values returns all elements of the ScanStatus enum
-func ScanStatus_Values() []string {
- return []string{
- ScanStatusInProgress,
- ScanStatusComplete,
- ScanStatusFailed,
- ScanStatusUnsupportedImage,
- ScanStatusActive,
- ScanStatusPending,
- ScanStatusScanEligibilityExpired,
- ScanStatusFindingsUnavailable,
- }
-}
-
-const (
- // ScanTypeBasic is a ScanType enum value
- ScanTypeBasic = "BASIC"
-
- // ScanTypeEnhanced is a ScanType enum value
- ScanTypeEnhanced = "ENHANCED"
-)
-
-// ScanType_Values returns all elements of the ScanType enum
-func ScanType_Values() []string {
- return []string{
- ScanTypeBasic,
- ScanTypeEnhanced,
- }
-}
-
-const (
- // ScanningConfigurationFailureCodeRepositoryNotFound is a ScanningConfigurationFailureCode enum value
- ScanningConfigurationFailureCodeRepositoryNotFound = "REPOSITORY_NOT_FOUND"
-)
-
-// ScanningConfigurationFailureCode_Values returns all elements of the ScanningConfigurationFailureCode enum
-func ScanningConfigurationFailureCode_Values() []string {
- return []string{
- ScanningConfigurationFailureCodeRepositoryNotFound,
- }
-}
-
-const (
- // ScanningRepositoryFilterTypeWildcard is a ScanningRepositoryFilterType enum value
- ScanningRepositoryFilterTypeWildcard = "WILDCARD"
-)
-
-// ScanningRepositoryFilterType_Values returns all elements of the ScanningRepositoryFilterType enum
-func ScanningRepositoryFilterType_Values() []string {
- return []string{
- ScanningRepositoryFilterTypeWildcard,
- }
-}
-
-const (
- // TagStatusTagged is a TagStatus enum value
- TagStatusTagged = "TAGGED"
-
- // TagStatusUntagged is a TagStatus enum value
- TagStatusUntagged = "UNTAGGED"
-
- // TagStatusAny is a TagStatus enum value
- TagStatusAny = "ANY"
-)
-
-// TagStatus_Values returns all elements of the TagStatus enum
-func TagStatus_Values() []string {
- return []string{
- TagStatusTagged,
- TagStatusUntagged,
- TagStatusAny,
- }
-}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go
deleted file mode 100644
index 247c4b751f48..000000000000
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-// Package ecr provides the client and types for making API
-// requests to Amazon EC2 Container Registry.
-//
-// Amazon Elastic Container Registry (Amazon ECR) is a managed container image
-// registry service. Customers can use the familiar Docker CLI, or their preferred
-// client, to push, pull, and manage images. Amazon ECR provides a secure, scalable,
-// and reliable registry for your Docker or Open Container Initiative (OCI)
-// images. Amazon ECR supports private repositories with resource-based permissions
-// using IAM so that specific users or Amazon EC2 instances can access repositories
-// and images.
-//
-// Amazon ECR has service endpoints in each supported Region. For more information,
-// see Amazon ECR endpoints (https://docs.aws.amazon.com/general/latest/gr/ecr.html)
-// in the Amazon Web Services General Reference.
-//
-// See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service.
-//
-// See ecr package documentation for more information.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/ecr/
-//
-// # Using the Client
-//
-// To contact Amazon EC2 Container Registry with the SDK use the New function to create
-// a new service client. With that client you can make API requests to the service.
-// These clients are safe to use concurrently.
-//
-// See the SDK's documentation for more information on how to use the SDK.
-// https://docs.aws.amazon.com/sdk-for-go/api/
-//
-// See aws.Config documentation for more information on configuring SDK clients.
-// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
-//
-// See the Amazon EC2 Container Registry client ECR for more
-// information on creating client for this service.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/ecr/#New
-package ecr
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go
deleted file mode 100644
index a8392ade8f85..000000000000
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package ecr
-
-import (
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const (
-
- // ErrCodeEmptyUploadException for service response error code
- // "EmptyUploadException".
- //
- // The specified layer upload does not contain any layer parts.
- ErrCodeEmptyUploadException = "EmptyUploadException"
-
- // ErrCodeImageAlreadyExistsException for service response error code
- // "ImageAlreadyExistsException".
- //
- // The specified image has already been pushed, and there were no changes to
- // the manifest or image tag after the last push.
- ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException"
-
- // ErrCodeImageDigestDoesNotMatchException for service response error code
- // "ImageDigestDoesNotMatchException".
- //
- // The specified image digest does not match the digest that Amazon ECR calculated
- // for the image.
- ErrCodeImageDigestDoesNotMatchException = "ImageDigestDoesNotMatchException"
-
- // ErrCodeImageNotFoundException for service response error code
- // "ImageNotFoundException".
- //
- // The image requested does not exist in the specified repository.
- ErrCodeImageNotFoundException = "ImageNotFoundException"
-
- // ErrCodeImageTagAlreadyExistsException for service response error code
- // "ImageTagAlreadyExistsException".
- //
- // The specified image is tagged with a tag that already exists. The repository
- // is configured for tag immutability.
- ErrCodeImageTagAlreadyExistsException = "ImageTagAlreadyExistsException"
-
- // ErrCodeInvalidLayerException for service response error code
- // "InvalidLayerException".
- //
- // The layer digest calculation performed by Amazon ECR upon receipt of the
- // image layer does not match the digest specified.
- ErrCodeInvalidLayerException = "InvalidLayerException"
-
- // ErrCodeInvalidLayerPartException for service response error code
- // "InvalidLayerPartException".
- //
- // The layer part size is not valid, or the first byte specified is not consecutive
- // to the last byte of a previous layer part upload.
- ErrCodeInvalidLayerPartException = "InvalidLayerPartException"
-
- // ErrCodeInvalidParameterException for service response error code
- // "InvalidParameterException".
- //
- // The specified parameter is invalid. Review the available parameters for the
- // API request.
- ErrCodeInvalidParameterException = "InvalidParameterException"
-
- // ErrCodeInvalidTagParameterException for service response error code
- // "InvalidTagParameterException".
- //
- // An invalid parameter has been specified. Tag keys can have a maximum character
- // length of 128 characters, and tag values can have a maximum length of 256
- // characters.
- ErrCodeInvalidTagParameterException = "InvalidTagParameterException"
-
- // ErrCodeKmsException for service response error code
- // "KmsException".
- //
- // The operation failed due to a KMS exception.
- ErrCodeKmsException = "KmsException"
-
- // ErrCodeLayerAlreadyExistsException for service response error code
- // "LayerAlreadyExistsException".
- //
- // The image layer already exists in the associated repository.
- ErrCodeLayerAlreadyExistsException = "LayerAlreadyExistsException"
-
- // ErrCodeLayerInaccessibleException for service response error code
- // "LayerInaccessibleException".
- //
- // The specified layer is not available because it is not associated with an
- // image. Unassociated image layers may be cleaned up at any time.
- ErrCodeLayerInaccessibleException = "LayerInaccessibleException"
-
- // ErrCodeLayerPartTooSmallException for service response error code
- // "LayerPartTooSmallException".
- //
- // Layer parts must be at least 5 MiB in size.
- ErrCodeLayerPartTooSmallException = "LayerPartTooSmallException"
-
- // ErrCodeLayersNotFoundException for service response error code
- // "LayersNotFoundException".
- //
- // The specified layers could not be found, or the specified layer is not valid
- // for this repository.
- ErrCodeLayersNotFoundException = "LayersNotFoundException"
-
- // ErrCodeLifecyclePolicyNotFoundException for service response error code
- // "LifecyclePolicyNotFoundException".
- //
- // The lifecycle policy could not be found, and no policy is set to the repository.
- ErrCodeLifecyclePolicyNotFoundException = "LifecyclePolicyNotFoundException"
-
- // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code
- // "LifecyclePolicyPreviewInProgressException".
- //
- // The previous lifecycle policy preview request has not completed. Wait and
- // try again.
- ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException"
-
- // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code
- // "LifecyclePolicyPreviewNotFoundException".
- //
- // There is no dry run for this repository.
- ErrCodeLifecyclePolicyPreviewNotFoundException = "LifecyclePolicyPreviewNotFoundException"
-
- // ErrCodeLimitExceededException for service response error code
- // "LimitExceededException".
- //
- // The operation did not succeed because it would have exceeded a service limit
- // for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
- // in the Amazon Elastic Container Registry User Guide.
- ErrCodeLimitExceededException = "LimitExceededException"
-
- // ErrCodePullThroughCacheRuleAlreadyExistsException for service response error code
- // "PullThroughCacheRuleAlreadyExistsException".
- //
- // A pull through cache rule with these settings already exists for the private
- // registry.
- ErrCodePullThroughCacheRuleAlreadyExistsException = "PullThroughCacheRuleAlreadyExistsException"
-
- // ErrCodePullThroughCacheRuleNotFoundException for service response error code
- // "PullThroughCacheRuleNotFoundException".
- //
- // The pull through cache rule was not found. Specify a valid pull through cache
- // rule and try again.
- ErrCodePullThroughCacheRuleNotFoundException = "PullThroughCacheRuleNotFoundException"
-
- // ErrCodeReferencedImagesNotFoundException for service response error code
- // "ReferencedImagesNotFoundException".
- //
- // The manifest list is referencing an image that does not exist.
- ErrCodeReferencedImagesNotFoundException = "ReferencedImagesNotFoundException"
-
- // ErrCodeRegistryPolicyNotFoundException for service response error code
- // "RegistryPolicyNotFoundException".
- //
- // The registry doesn't have an associated registry policy.
- ErrCodeRegistryPolicyNotFoundException = "RegistryPolicyNotFoundException"
-
- // ErrCodeRepositoryAlreadyExistsException for service response error code
- // "RepositoryAlreadyExistsException".
- //
- // The specified repository already exists in the specified registry.
- ErrCodeRepositoryAlreadyExistsException = "RepositoryAlreadyExistsException"
-
- // ErrCodeRepositoryNotEmptyException for service response error code
- // "RepositoryNotEmptyException".
- //
- // The specified repository contains images. To delete a repository that contains
- // images, you must force the deletion with the force parameter.
- ErrCodeRepositoryNotEmptyException = "RepositoryNotEmptyException"
-
- // ErrCodeRepositoryNotFoundException for service response error code
- // "RepositoryNotFoundException".
- //
- // The specified repository could not be found. Check the spelling of the specified
- // repository and ensure that you are performing operations on the correct registry.
- ErrCodeRepositoryNotFoundException = "RepositoryNotFoundException"
-
- // ErrCodeRepositoryPolicyNotFoundException for service response error code
- // "RepositoryPolicyNotFoundException".
- //
- // The specified repository and registry combination does not have an associated
- // repository policy.
- ErrCodeRepositoryPolicyNotFoundException = "RepositoryPolicyNotFoundException"
-
- // ErrCodeScanNotFoundException for service response error code
- // "ScanNotFoundException".
- //
- // The specified image scan could not be found. Ensure that image scanning is
- // enabled on the repository and try again.
- ErrCodeScanNotFoundException = "ScanNotFoundException"
-
- // ErrCodeServerException for service response error code
- // "ServerException".
- //
- // These errors are usually caused by a server-side issue.
- ErrCodeServerException = "ServerException"
-
- // ErrCodeTooManyTagsException for service response error code
- // "TooManyTagsException".
- //
- // The list of tags on the repository is over the limit. The maximum number
- // of tags that can be applied to a repository is 50.
- ErrCodeTooManyTagsException = "TooManyTagsException"
-
- // ErrCodeUnsupportedImageTypeException for service response error code
- // "UnsupportedImageTypeException".
- //
- // The image is of a type that cannot be scanned.
- ErrCodeUnsupportedImageTypeException = "UnsupportedImageTypeException"
-
- // ErrCodeUnsupportedUpstreamRegistryException for service response error code
- // "UnsupportedUpstreamRegistryException".
- //
- // The specified upstream registry isn't supported.
- ErrCodeUnsupportedUpstreamRegistryException = "UnsupportedUpstreamRegistryException"
-
- // ErrCodeUploadNotFoundException for service response error code
- // "UploadNotFoundException".
- //
- // The upload could not be found, or the specified upload ID is not valid for
- // this repository.
- ErrCodeUploadNotFoundException = "UploadNotFoundException"
-
- // ErrCodeValidationException for service response error code
- // "ValidationException".
- //
- // There was an exception validating this request.
- ErrCodeValidationException = "ValidationException"
-)
-
-var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
- "EmptyUploadException": newErrorEmptyUploadException,
- "ImageAlreadyExistsException": newErrorImageAlreadyExistsException,
- "ImageDigestDoesNotMatchException": newErrorImageDigestDoesNotMatchException,
- "ImageNotFoundException": newErrorImageNotFoundException,
- "ImageTagAlreadyExistsException": newErrorImageTagAlreadyExistsException,
- "InvalidLayerException": newErrorInvalidLayerException,
- "InvalidLayerPartException": newErrorInvalidLayerPartException,
- "InvalidParameterException": newErrorInvalidParameterException,
- "InvalidTagParameterException": newErrorInvalidTagParameterException,
- "KmsException": newErrorKmsException,
- "LayerAlreadyExistsException": newErrorLayerAlreadyExistsException,
- "LayerInaccessibleException": newErrorLayerInaccessibleException,
- "LayerPartTooSmallException": newErrorLayerPartTooSmallException,
- "LayersNotFoundException": newErrorLayersNotFoundException,
- "LifecyclePolicyNotFoundException": newErrorLifecyclePolicyNotFoundException,
- "LifecyclePolicyPreviewInProgressException": newErrorLifecyclePolicyPreviewInProgressException,
- "LifecyclePolicyPreviewNotFoundException": newErrorLifecyclePolicyPreviewNotFoundException,
- "LimitExceededException": newErrorLimitExceededException,
- "PullThroughCacheRuleAlreadyExistsException": newErrorPullThroughCacheRuleAlreadyExistsException,
- "PullThroughCacheRuleNotFoundException": newErrorPullThroughCacheRuleNotFoundException,
- "ReferencedImagesNotFoundException": newErrorReferencedImagesNotFoundException,
- "RegistryPolicyNotFoundException": newErrorRegistryPolicyNotFoundException,
- "RepositoryAlreadyExistsException": newErrorRepositoryAlreadyExistsException,
- "RepositoryNotEmptyException": newErrorRepositoryNotEmptyException,
- "RepositoryNotFoundException": newErrorRepositoryNotFoundException,
- "RepositoryPolicyNotFoundException": newErrorRepositoryPolicyNotFoundException,
- "ScanNotFoundException": newErrorScanNotFoundException,
- "ServerException": newErrorServerException,
- "TooManyTagsException": newErrorTooManyTagsException,
- "UnsupportedImageTypeException": newErrorUnsupportedImageTypeException,
- "UnsupportedUpstreamRegistryException": newErrorUnsupportedUpstreamRegistryException,
- "UploadNotFoundException": newErrorUploadNotFoundException,
- "ValidationException": newErrorValidationException,
-}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go
deleted file mode 100644
index 24d3ab488ce3..000000000000
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package ecr
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/signer/v4"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
-)
-
-// ECR provides the API operation methods for making requests to
-// Amazon EC2 Container Registry. See this package's package overview docs
-// for details on the service.
-//
-// ECR methods are safe to use concurrently. It is not safe to
-// modify mutate any of the struct's properties though.
-type ECR struct {
- *client.Client
-}
-
-// Used for custom client initialization logic
-var initClient func(*client.Client)
-
-// Used for custom request initialization logic
-var initRequest func(*request.Request)
-
-// Service information constants
-const (
- ServiceName = "ecr" // Name of service.
- EndpointsID = "api.ecr" // ID to lookup a service endpoint with.
- ServiceID = "ECR" // ServiceID is a unique identifier of a specific service.
-)
-
-// New creates a new instance of the ECR client with a session.
-// If additional configuration is needed for the client instance use the optional
-// aws.Config parameter to add your extra config.
-//
-// Example:
-//
-// mySession := session.Must(session.NewSession())
-//
-// // Create a ECR client from just a session.
-// svc := ecr.New(mySession)
-//
-// // Create a ECR client with additional configuration
-// svc := ecr.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
-func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR {
- c := p.ClientConfig(EndpointsID, cfgs...)
- if c.SigningNameDerived || len(c.SigningName) == 0 {
- c.SigningName = "ecr"
- }
- return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
-}
-
-// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *ECR {
- svc := &ECR{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: ServiceName,
- ServiceID: ServiceID,
- SigningName: signingName,
- SigningRegion: signingRegion,
- PartitionID: partitionID,
- Endpoint: endpoint,
- APIVersion: "2015-09-21",
- ResolvedRegion: resolvedRegion,
- JSONVersion: "1.1",
- TargetPrefix: "AmazonEC2ContainerRegistry_V20150921",
- },
- handlers,
- ),
- }
-
- // Handlers
- svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
- svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
- svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
- svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
- svc.Handlers.UnmarshalError.PushBackNamed(
- protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
- )
-
- // Run custom client initialization if present
- if initClient != nil {
- initClient(svc.Client)
- }
-
- return svc
-}
-
-// newRequest creates a new request for a ECR operation and runs any
-// custom request initialization.
-func (c *ECR) newRequest(op *request.Operation, params, data interface{}) *request.Request {
- req := c.NewRequest(op, params, data)
-
- // Run custom request initialization if present
- if initRequest != nil {
- initRequest(req)
- }
-
- return req
-}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/waiters.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/waiters.go
deleted file mode 100644
index 4b6f88e4e151..000000000000
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/ecr/waiters.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package ecr
-
-import (
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// WaitUntilImageScanComplete uses the Amazon ECR API operation
-// DescribeImageScanFindings to wait for a condition to be met before returning.
-// If the condition is not met within the max attempt window, an error will
-// be returned.
-func (c *ECR) WaitUntilImageScanComplete(input *DescribeImageScanFindingsInput) error {
- return c.WaitUntilImageScanCompleteWithContext(aws.BackgroundContext(), input)
-}
-
-// WaitUntilImageScanCompleteWithContext is an extended version of WaitUntilImageScanComplete.
-// With the support for passing in a context and options to configure the
-// Waiter and the underlying request options.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) WaitUntilImageScanCompleteWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, opts ...request.WaiterOption) error {
- w := request.Waiter{
- Name: "WaitUntilImageScanComplete",
- MaxAttempts: 60,
- Delay: request.ConstantWaiterDelay(5 * time.Second),
- Acceptors: []request.WaiterAcceptor{
- {
- State: request.SuccessWaiterState,
- Matcher: request.PathWaiterMatch, Argument: "imageScanStatus.status",
- Expected: "COMPLETE",
- },
- {
- State: request.FailureWaiterState,
- Matcher: request.PathWaiterMatch, Argument: "imageScanStatus.status",
- Expected: "FAILED",
- },
- },
- Logger: c.Config.Logger,
- NewRequest: func(opts []request.Option) (*request.Request, error) {
- var inCpy *DescribeImageScanFindingsInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.DescribeImageScanFindingsRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
- w.ApplyOptions(opts...)
-
- return w.WaitWithContext(ctx)
-}
-
-// WaitUntilLifecyclePolicyPreviewComplete uses the Amazon ECR API operation
-// GetLifecyclePolicyPreview to wait for a condition to be met before returning.
-// If the condition is not met within the max attempt window, an error will
-// be returned.
-func (c *ECR) WaitUntilLifecyclePolicyPreviewComplete(input *GetLifecyclePolicyPreviewInput) error {
- return c.WaitUntilLifecyclePolicyPreviewCompleteWithContext(aws.BackgroundContext(), input)
-}
-
-// WaitUntilLifecyclePolicyPreviewCompleteWithContext is an extended version of WaitUntilLifecyclePolicyPreviewComplete.
-// With the support for passing in a context and options to configure the
-// Waiter and the underlying request options.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *ECR) WaitUntilLifecyclePolicyPreviewCompleteWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, opts ...request.WaiterOption) error {
- w := request.Waiter{
- Name: "WaitUntilLifecyclePolicyPreviewComplete",
- MaxAttempts: 20,
- Delay: request.ConstantWaiterDelay(5 * time.Second),
- Acceptors: []request.WaiterAcceptor{
- {
- State: request.SuccessWaiterState,
- Matcher: request.PathWaiterMatch, Argument: "status",
- Expected: "COMPLETE",
- },
- {
- State: request.FailureWaiterState,
- Matcher: request.PathWaiterMatch, Argument: "status",
- Expected: "FAILED",
- },
- },
- Logger: c.Config.Logger,
- NewRequest: func(opts []request.Option) (*request.Request, error) {
- var inCpy *GetLifecyclePolicyPreviewInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.GetLifecyclePolicyPreviewRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
- w.ApplyOptions(opts...)
-
- return w.WaitWithContext(ctx)
-}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go
index 74b09dec696c..643b4fb74c2e 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go
@@ -4840,7 +4840,7 @@ type CreateTargetGroupInput struct {
// The number of consecutive health check successes required before considering
// a target healthy. The range is 2-10. If the target group protocol is TCP,
// TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 5. For target groups with
- // a protocol of GENEVE, the default is 3. If the target type is lambda, the
+ // a protocol of GENEVE, the default is 5. If the target type is lambda, the
// default is 5.
HealthyThresholdCount *int64 `min:"2" type:"integer"`
@@ -4907,7 +4907,7 @@ type CreateTargetGroupInput struct {
// The number of consecutive health check failures required before considering
// a target unhealthy. The range is 2-10. If the target group protocol is TCP,
// TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 2. For target groups with
- // a protocol of GENEVE, the default is 3. If the target type is lambda, the
+ // a protocol of GENEVE, the default is 2. If the target type is lambda, the
// default is 5.
UnhealthyThresholdCount *int64 `min:"2" type:"integer"`
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/api.go
index 2ac8aa1be1d5..8f594ac0d8ee 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/api.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/api.go
@@ -93,8 +93,8 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -104,10 +104,18 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletion
func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeletionOutput, error) {
req, out := c.CancelKeyDeletionRequest(input)
@@ -175,32 +183,26 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
// ConnectCustomKeyStore API operation for AWS Key Management Service.
//
// Connects or reconnects a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// to its associated CloudHSM cluster.
+// to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore
+// connects the key store to its associated CloudHSM cluster. For an external
+// key store, ConnectCustomKeyStore connects the key store to the external key
+// store proxy that communicates with your external key manager.
//
// The custom key store must be connected before you can create KMS keys in
// the key store or use the KMS keys it contains. You can disconnect and reconnect
// a custom key store at any time.
//
-// To connect a custom key store, its associated CloudHSM cluster must have
-// at least one active HSM. To get the number of active HSMs in a cluster, use
-// the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
-// operation. To add HSMs to the cluster, use the CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
-// operation. Also, the kmsuser crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser)
-// (CU) must not be logged into the cluster. This prevents KMS from using this
-// account to log in.
-//
-// The connection process can take an extended amount of time to complete; up
-// to 20 minutes. This operation starts the connection process, but it does
-// not wait for it to complete. When it succeeds, this operation quickly returns
-// an HTTP 200 response and a JSON object with no properties. However, this
-// response does not indicate that the custom key store is connected. To get
-// the connection state of the custom key store, use the DescribeCustomKeyStores
+// The connection process for a custom key store can take an extended amount
+// of time to complete. This operation starts the connection process, but it
+// does not wait for it to complete. When it succeeds, this operation quickly
+// returns an HTTP 200 response and a JSON object with no properties. However,
+// this response does not indicate that the custom key store is connected. To
+// get the connection state of the custom key store, use the DescribeCustomKeyStores
// operation.
//
-// During the connection process, KMS finds the CloudHSM cluster that is associated
-// with the custom key store, creates the connection infrastructure, connects
-// to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates
-// its password.
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// feature in KMS, which combines the convenience and extensive integration
+// of KMS with the isolation and control of a key store that you own and manage.
//
// The ConnectCustomKeyStore operation might fail for various reasons. To find
// the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode
@@ -210,8 +212,44 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
// the custom key store, correct the error, use the UpdateCustomKeyStore operation
// if necessary, and then use ConnectCustomKeyStore again.
//
-// If you are having trouble connecting or disconnecting a custom key store,
-// see Troubleshooting a Custom Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+// # CloudHSM key store
+//
+// During the connection process for an CloudHSM key store, KMS finds the CloudHSM
+// cluster that is associated with the custom key store, creates the connection
+// infrastructure, connects to the cluster, logs into the CloudHSM client as
+// the kmsuser CU, and rotates its password.
+//
+// To connect an CloudHSM key store, its associated CloudHSM cluster must have
+// at least one active HSM. To get the number of active HSMs in a cluster, use
+// the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// operation. To add HSMs to the cluster, use the CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// operation. Also, the kmsuser crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser)
+// (CU) must not be logged into the cluster. This prevents KMS from using this
+// account to log in.
+//
+// If you are having trouble connecting or disconnecting a CloudHSM key store,
+// see Troubleshooting an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+// in the Key Management Service Developer Guide.
+//
+// # External key store
+//
+// When you connect an external key store that uses public endpoint connectivity,
+// KMS tests its ability to communicate with your external key manager by sending
+// a request via the external key store proxy.
+//
+// When you connect to an external key store that uses VPC endpoint service
+// connectivity, KMS establishes the networking elements that it needs to communicate
+// with your external key manager via the external key store proxy. This includes
+// creating an interface endpoint to the VPC endpoint service and a private
+// hosted zone for traffic between KMS and the VPC endpoint service.
+//
+// To connect an external key store, KMS must be able to connect to the external
+// key store proxy, the external key store proxy must be able to communicate
+// with your external key manager, and the external key manager must be available
+// for cryptographic operations.
+//
+// If you are having trouble connecting or disconnecting an external key store,
+// see Troubleshooting an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html)
// in the Key Management Service Developer Guide.
//
// Cross-account use: No. You cannot perform this operation on a custom key
@@ -242,10 +280,9 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
// Returned Error Types:
//
// - CloudHsmClusterNotActiveException
-// The request was rejected because the CloudHSM cluster that is associated
-// with the custom key store is not active. Initialize and activate the cluster
-// and try the command again. For detailed instructions, see Getting Started
-// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+// The request was rejected because the CloudHSM cluster associated with the
+// CloudHSM key store is not active. Initialize and activate the cluster and
+// try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
//
// - CustomKeyStoreInvalidStateException
@@ -255,17 +292,27 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - CustomKeyStoreNotFoundException
// The request was rejected because KMS cannot find a custom key store with
@@ -277,29 +324,29 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
//
// - CloudHsmClusterInvalidConfigurationException
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
@@ -375,7 +422,7 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request,
// Creates a friendly name for a KMS key.
//
// Adding, deleting, or updating an alias can allow or deny permission to the
-// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// You can use an alias to identify a KMS key in the KMS console, in the DescribeKey
@@ -433,8 +480,8 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request,
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - AlreadyExistsException
// The request was rejected because it attempted to create a resource that already
@@ -460,10 +507,18 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAlias
func (c *KMS) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) {
req, out := c.CreateAliasRequest(input)
@@ -530,27 +585,65 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req
// CreateCustomKeyStore API operation for AWS Key Management Service.
//
// Creates a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// that is associated with an CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html)
-// that you own and manage.
-//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// backed by a key store that you own and manage. When you use a KMS key in
+// a custom key store for a cryptographic operation, the cryptographic operation
+// is actually performed in your key store using your keys. KMS supports CloudHSM
+// key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html)
+// backed by an CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html)
+// and external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html)
+// backed by an external key store proxy and external key manager outside of
+// Amazon Web Services.
+//
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
-//
-// Before you create the custom key store, you must assemble the required elements,
-// including an CloudHSM cluster that fulfills the requirements for a custom
-// key store. For details about the required elements, see Assemble the Prerequisites
-// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// of KMS with the isolation and control of a key store that you own and manage.
+//
+// Before you create the custom key store, the required elements must be in
+// place and operational. We recommend that you use the test tools that KMS
+// provides to verify the configuration your external key store proxy. For details
+// about the required elements and verification tests, see Assemble the prerequisites
+// (for CloudHSM key stores) (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// or Assemble the prerequisites (for external key stores) (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements)
// in the Key Management Service Developer Guide.
//
+// To create a custom key store, use the following parameters.
+//
+// - To create an CloudHSM key store, specify the CustomKeyStoreName, CloudHsmClusterId,
+// KeyStorePassword, and TrustAnchorCertificate. The CustomKeyStoreType parameter
+// is optional for CloudHSM key stores. If you include it, set it to the
+// default value, AWS_CLOUDHSM. For help with failures, see Troubleshooting
+// an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+// in the Key Management Service Developer Guide.
+//
+// - To create an external key store, specify the CustomKeyStoreName and
+// a CustomKeyStoreType of EXTERNAL_KEY_STORE. Also, specify values for XksProxyConnectivity,
+// XksProxyAuthenticationCredential, XksProxyUriEndpoint, and XksProxyUriPath.
+// If your XksProxyConnectivity value is VPC_ENDPOINT_SERVICE, specify the
+// XksProxyVpcEndpointServiceName parameter. For help with failures, see
+// Troubleshooting an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html)
+// in the Key Management Service Developer Guide.
+//
+// For external key stores:
+//
+// Some external key managers provide a simpler method for creating an external
+// key store. For details, see your external key manager documentation.
+//
+// When creating an external key store in the KMS console, you can upload a
+// JSON-based proxy configuration file with the desired values. You cannot use
+// a proxy configuration with the CreateCustomKeyStore operation. However, you
+// can use the values in the file to help you determine the correct values for
+// the CreateCustomKeyStore parameters.
+//
// When the operation completes successfully, it returns the ID of the new custom
// key store. Before you can use your new custom key store, you need to use
-// the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM
-// cluster. Even if you are not going to use your custom key store immediately,
-// you might want to connect it to verify that all settings are correct and
-// then disconnect it until you are ready to use it.
-//
-// For help with failures, see Troubleshooting a Custom Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+// the ConnectCustomKeyStore operation to connect a new CloudHSM key store to
+// its CloudHSM cluster, or to connect a new external key store to the external
+// key store proxy for your external key manager. Even if you are not going
+// to use your custom key store immediately, you might want to connect it to
+// verify that all settings are correct and then disconnect it until you are
+// ready to use it.
+//
+// For help with failures, see Troubleshooting a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
// in the Key Management Service Developer Guide.
//
// Cross-account use: No. You cannot perform this operation on a custom key
@@ -582,12 +675,13 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req
//
// - CloudHsmClusterInUseException
// The request was rejected because the specified CloudHSM cluster is already
-// associated with a custom key store or it shares a backup history with a cluster
-// that is associated with a custom key store. Each custom key store must be
-// associated with a different CloudHSM cluster.
+// associated with an CloudHSM key store in the account, or it shares a backup
+// history with an CloudHSM key store in the account. Each CloudHSM key store
+// in the account must be associated with a different CloudHSM cluster.
//
-// Clusters that share a backup history have the same cluster certificate. To
-// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// CloudHSM clusters that share a backup history have the same cluster certificate.
+// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
//
// - CustomKeyStoreNameInUseException
@@ -604,51 +698,113 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req
// can be retried.
//
// - CloudHsmClusterNotActiveException
-// The request was rejected because the CloudHSM cluster that is associated
-// with the custom key store is not active. Initialize and activate the cluster
-// and try the command again. For detailed instructions, see Getting Started
-// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+// The request was rejected because the CloudHSM cluster associated with the
+// CloudHSM key store is not active. Initialize and activate the cluster and
+// try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
//
// - IncorrectTrustAnchorException
// The request was rejected because the trust anchor certificate in the request
-// is not the trust anchor certificate for the specified CloudHSM cluster.
+// to create an CloudHSM key store is not the trust anchor certificate for the
+// specified CloudHSM cluster.
//
-// When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
+// When you initialize the CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
// you create the trust anchor certificate and save it in the customerCA.crt
// file.
//
// - CloudHsmClusterInvalidConfigurationException
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// in the CloudHSM User Guide .
//
+// - LimitExceededException
+// The request was rejected because a quota was exceeded. For more information,
+// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html)
+// in the Key Management Service Developer Guide.
+//
+// - XksProxyUriInUseException
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// and XksProxyUriPath is already associated with an external key store in the
+// Amazon Web Services account and Region. Each external key store in an account
+// and Region must use a unique external key store proxy API address.
+//
+// - XksProxyUriEndpointInUseException
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// is already associated with an external key store in the Amazon Web Services
+// account and Region. Each external key store in an account and Region must
+// use a unique external key store proxy address.
+//
+// - XksProxyUriUnreachableException
+// KMS was unable to reach the specified XksProxyUriPath. The path must be reachable
+// before you create the external key store or update its settings.
+//
+// This exception is also thrown when the external key store proxy response
+// to a GetHealthStatus request indicates that all external key manager instances
+// are unavailable.
+//
+// - XksProxyIncorrectAuthenticationCredentialException
+// The request was rejected because the proxy credentials failed to authenticate
+// to the specified external key store proxy. The specified external key store
+// proxy rejected a status request from KMS due to invalid credentials. This
+// can indicate an error in the credentials or in the identification of the
+// external key store proxy.
+//
+// - XksProxyVpcEndpointServiceInUseException
+// The request was rejected because the specified Amazon VPC endpoint service
+// is already associated with an external key store in the Amazon Web Services
+// account and Region. Each external key store in an Amazon Web Services account
+// and Region must use a different Amazon VPC endpoint service.
+//
+// - XksProxyVpcEndpointServiceNotFoundException
+// The request was rejected because KMS could not find the specified VPC endpoint
+// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name
+// for the external key store. Also, confirm that the Allow principals list
+// for the VPC endpoint service includes the KMS service principal for the Region,
+// such as cks.kms.us-east-1.amazonaws.com.
+//
+// - XksProxyVpcEndpointServiceInvalidConfigurationException
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store proxy. For details,
+// see the exception message and review the requirements (kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+// for Amazon VPC endpoint service connectivity for an external key store.
+//
+// - XksProxyInvalidResponseException
+// KMS cannot interpret the response it received from the external key store
+// proxy. The problem might be a poorly constructed response, but it could also
+// be a transient network issue. If you see this error repeatedly, report it
+// to the proxy vendor.
+//
+// - XksProxyInvalidConfigurationException
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store proxy. For details,
+// see the exception message.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore
func (c *KMS) CreateCustomKeyStore(input *CreateCustomKeyStoreInput) (*CreateCustomKeyStoreOutput, error) {
req, out := c.CreateCustomKeyStoreRequest(input)
@@ -783,8 +939,8 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request,
// The request was rejected because the specified KMS key is not enabled.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidArnException
// The request was rejected because a specified ARN, or an ARN in a key policy,
@@ -806,10 +962,18 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrant
func (c *KMS) CreateGrant(input *CreateGrantInput) (*CreateGrantOutput, error) {
req, out := c.CreateGrantRequest(input)
@@ -876,13 +1040,21 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// CreateKey API operation for AWS Key Management Service.
//
// Creates a unique customer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys)
-// in your Amazon Web Services account and Region.
+// in your Amazon Web Services account and Region. You can use a KMS key in
+// cryptographic operations, such as encryption and signing. Some Amazon Web
+// Services services let you use KMS keys that you create and manage to protect
+// your service resources.
//
-// In addition to the required parameters, you can use the optional parameters
-// to specify a key policy, description, tags, and other useful elements for
-// any key type.
+// A KMS key is a logical representation of a cryptographic key. In addition
+// to the key material used in cryptographic operations, a KMS key includes
+// metadata, such as the key ID, key policy, creation date, description, and
+// key state. For details, see Managing keys (https://docs.aws.amazon.com/kms/latest/developerguide/getting-started.html)
+// in the Key Management Service Developer Guide
//
-// KMS is replacing the term customer master key (CMK) with KMS key and KMS
+// Use the parameters of CreateKey to specify the type of KMS key, the source
+// of its key material, its key policy, description, tags, and other properties.
+//
+// KMS has replaced the term customer master key (CMK) with KMS key and KMS
// key. The concept has not changed. To prevent breaking changes, KMS is keeping
// some variations of this term.
//
@@ -890,11 +1062,14 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
//
// # Symmetric encryption KMS key
//
-// To create a symmetric encryption KMS key, you aren't required to specify
-// any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, and the
-// default value for KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption
-// KMS key. For technical details, see SYMMETRIC_DEFAULT key spec (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-symmetric-default)
-// in the Key Management Service Developer Guide.
+// By default, CreateKey creates a symmetric encryption KMS key with key material
+// that KMS generates. This is the basic and most widely used type of KMS key,
+// and provides the best performance.
+//
+// To create a symmetric encryption KMS key, you don't need to specify any parameters.
+// The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage,
+// ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric
+// encryption KMS key with KMS key material.
//
// If you need a key for basic encryption and decryption or you are creating
// a KMS key to protect your resources in an Amazon Web Services service, create
@@ -965,12 +1140,13 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
// in the Key Management Service Developer Guide.
//
-// To import your own key material, begin by creating a symmetric encryption
-// KMS key with no key material. To do this, use the Origin parameter of CreateKey
-// with a value of EXTERNAL. Next, use GetParametersForImport operation to get
-// a public key and import token, and use the public key to encrypt your key
-// material. Then, use ImportKeyMaterial with your import token to import the
-// key material. For step-by-step instructions, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
+// To import your own key material into a KMS key, begin by creating a symmetric
+// encryption KMS key with no key material. To do this, use the Origin parameter
+// of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation
+// to get a public key and import token, and use the public key to encrypt your
+// key material. Then, use ImportKeyMaterial with your import token to import
+// the key material. For step-by-step instructions, see Importing Key Material
+// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
// in the Key Management Service Developer Guide .
//
// This feature supports only symmetric encryption KMS keys, including multi-Region
@@ -980,22 +1156,52 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// To create a multi-Region primary key with imported key material, use the
// Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion
// parameter with a value of True. To create replicas of the multi-Region primary
-// key, use the ReplicateKey operation. For more information about multi-Region
-// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
+// key, use the ReplicateKey operation. For instructions, see Importing key
+// material into multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html).
+// For more information about multi-Region keys, see Multi-Region keys in KMS
+// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
// in the Key Management Service Developer Guide.
//
// # Custom key store
//
-// To create a symmetric encryption KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
-// use the CustomKeyStoreId parameter to specify the custom key store. You must
-// also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM
-// cluster that is associated with the custom key store must have at least two
-// active HSMs in different Availability Zones in the Amazon Web Services Region.
+// A custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// lets you protect your Amazon Web Services resources using keys in a backing
+// key store that you own and manage. When you request a cryptographic operation
+// with a KMS key in a custom key store, the operation is performed in the backing
+// key store using its cryptographic keys.
+//
+// KMS supports CloudHSM key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html)
+// backed by an CloudHSM cluster and external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html)
+// backed by an external key manager outside of Amazon Web Services. When you
+// create a KMS key in an CloudHSM key store, KMS generates an encryption key
+// in the CloudHSM cluster and associates it with the KMS key. When you create
+// a KMS key in an external key store, you specify an existing encryption key
+// in the external key manager.
+//
+// Some external key managers provide a simpler method for creating a KMS key
+// in an external key store. For details, see your external key manager documentation.
+//
+// Before you create a KMS key in a custom key store, the ConnectionState of
+// the key store must be CONNECTED. To connect the custom key store, use the
+// ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores
+// operation.
//
-// Custom key stores support only symmetric encryption KMS keys. You cannot
-// create an HMAC KMS key or an asymmetric KMS key in a custom key store. For
-// information about custom key stores in KMS see Custom key stores in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// in the Key Management Service Developer Guide .
+// To create a KMS key in a custom key store, use the CustomKeyStoreId. Use
+// the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value,
+// ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is
+// supported in a custom key store.
+//
+// To create a KMS key in an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html),
+// use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster
+// that is associated with the custom key store must have at least two active
+// HSMs in different Availability Zones in the Amazon Web Services Region.
+//
+// To create a KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html),
+// use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId
+// parameter that identifies an existing external key.
+//
+// Some external key managers provide a simpler method for creating a KMS key
+// in an external key store. For details, see your external key manager documentation.
//
// Cross-account use: No. You cannot use this operation to create a KMS key
// in a different Amazon Web Services account.
@@ -1028,8 +1234,8 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// or semantically correct.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidArnException
// The request was rejected because a specified ARN, or an ARN in a key policy,
@@ -1062,49 +1268,83 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - CloudHsmClusterInvalidConfigurationException
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// in the CloudHSM User Guide .
//
+// - XksKeyInvalidConfigurationException
+// The request was rejected because the external key specified by the XksKeyId
+// parameter did not meet the configuration requirements for an external key
+// store.
+//
+// The external key must be an AES-256 symmetric key that is enabled and performs
+// encryption and decryption.
+//
+// - XksKeyAlreadyInUseException
+// The request was rejected because the (XksKeyId) is already associated with
+// a KMS key in this external key store. Each KMS key in an external key store
+// must be associated with a different external key.
+//
+// - XksKeyNotFoundException
+// The request was rejected because the external key store proxy could not find
+// the external key. This exception is thrown when the value of the XksKeyId
+// parameter doesn't identify a key in the external key manager associated with
+// the external key proxy.
+//
+// Verify that the XksKeyId represents an existing key in the external key manager.
+// Use the key identifier that the external key store proxy uses to identify
+// the key. For details, see the documentation provided with your external key
+// store proxy or key manager.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey
func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) {
req, out := c.CreateKeyRequest(input)
@@ -1192,8 +1432,8 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
//
// The Decrypt operation also decrypts ciphertext that was encrypted outside
// of KMS by the public key in an KMS asymmetric KMS key. However, it cannot
-// decrypt ciphertext produced by other libraries, such as the Amazon Web Services
-// Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/)
+// decrypt symmetric ciphertext produced by other libraries, such as the Amazon
+// Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/)
// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html).
// These libraries return a ciphertext format that is incompatible with KMS.
//
@@ -1209,11 +1449,11 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
// KMS key that you intend.
//
// Whenever possible, use key policies to give users permission to call the
-// Decrypt operation on a particular KMS key, instead of using IAM policies.
-// Otherwise, you might create an IAM user policy that gives the user Decrypt
-// permission on all KMS keys. This user could decrypt ciphertext that was encrypted
-// by KMS keys in other accounts if the key policy for the cross-account KMS
-// key permits it. If you must use an IAM policy for Decrypt permissions, limit
+// Decrypt operation on a particular KMS key, instead of using &IAM; policies.
+// Otherwise, you might create an &IAM; policy that gives the user Decrypt permission
+// on all KMS keys. This user could decrypt ciphertext that was encrypted by
+// KMS keys in other accounts if the key policy for the cross-account KMS key
+// permits it. If you must use an IAM policy for Decrypt permissions, limit
// the user to particular KMS keys or particular trusted accounts. For details,
// see Best practices for IAM policies (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices)
// in the Key Management Service Developer Guide.
@@ -1228,9 +1468,9 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide.
//
-// Cross-account use: Yes. To perform this operation with a KMS key in a different
-// Amazon Web Services account, specify the key ARN or alias ARN in the value
-// of the KeyId parameter.
+// Cross-account use: Yes. If you use the KeyId parameter to identify a KMS
+// key in a different Amazon Web Services account, specify the key ARN or the
+// alias ARN of the KMS key.
//
// Required permissions: kms:Decrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
// (key policy)
@@ -1297,8 +1537,8 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
// key, use the DescribeKey operation.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidGrantTokenException
// The request was rejected because the specified grant token is not valid.
@@ -1311,10 +1551,18 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Decrypt
func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) {
req, out := c.DecryptRequest(input)
@@ -1384,7 +1632,7 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request,
// Deletes the specified alias.
//
// Adding, deleting, or updating an alias can allow or deny permission to the
-// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// Because an alias is not a property of a KMS key, you can delete and change
@@ -1428,8 +1676,8 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request,
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -1443,10 +1691,18 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAlias
func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) {
req, out := c.DeleteAliasRequest(input)
@@ -1514,33 +1770,39 @@ func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req
// DeleteCustomKeyStore API operation for AWS Key Management Service.
//
// Deletes a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
-// This operation does not delete the CloudHSM cluster that is associated with
-// the custom key store, or affect any users or keys in the cluster.
+// This operation does not affect any backing elements of the custom key store.
+// It does not delete the CloudHSM cluster that is associated with an CloudHSM
+// key store, or affect any users or keys in the cluster. For an external key
+// store, it does not affect the external key store proxy, external key manager,
+// or any external keys.
+//
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// feature in KMS, which combines the convenience and extensive integration
+// of KMS with the isolation and control of a key store that you own and manage.
//
// The custom key store that you delete cannot contain any KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys).
// Before deleting the key store, verify that you will never need to use any
// of the KMS keys in the key store for any cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations).
// Then, use ScheduleKeyDeletion to delete the KMS keys from the key store.
-// When the scheduled waiting period expires, the ScheduleKeyDeletion operation
-// deletes the KMS keys. Then it makes a best effort to delete the key material
-// from the associated cluster. However, you might need to manually delete the
-// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key)
-// from the cluster and its backups.
-//
-// After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore to
-// disconnect the key store from KMS. Then, you can delete the custom key store.
-//
-// Instead of deleting the custom key store, consider using DisconnectCustomKeyStore
-// to disconnect it from KMS. While the key store is disconnected, you cannot
-// create or use the KMS keys in the key store. But, you do not need to delete
-// KMS keys and you can reconnect a disconnected custom key store at any time.
+// After the required waiting period expires and all KMS keys are deleted from
+// the custom key store, use DisconnectCustomKeyStore to disconnect the key
+// store from KMS. Then, you can delete the custom key store.
+//
+// For keys in an CloudHSM key store, the ScheduleKeyDeletion operation makes
+// a best effort to delete the key material from the associated cluster. However,
+// you might need to manually delete the orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key)
+// from the cluster and its backups. KMS never creates, manages, or deletes
+// cryptographic keys in the external key manager associated with an external
+// key store. You must manage them using your external key manager tools.
+//
+// Instead of deleting the custom key store, consider using the DisconnectCustomKeyStore
+// operation to disconnect the custom key store from its backing key store.
+// While the key store is disconnected, you cannot create or use the KMS keys
+// in the key store. But, you do not need to delete KMS keys and you can reconnect
+// a disconnected custom key store at any time.
//
// If the operation succeeds, it returns a JSON object with no properties.
//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
-//
// Cross-account use: No. You cannot perform this operation on a custom key
// store in a different Amazon Web Services account.
//
@@ -1581,17 +1843,27 @@ func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - CustomKeyStoreNotFoundException
// The request was rejected because KMS cannot find a custom key store with
@@ -1713,8 +1985,8 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI
// a specified resource is not valid for this operation.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -1728,10 +2000,18 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterial
func (c *KMS) DeleteImportedKeyMaterial(input *DeleteImportedKeyMaterialInput) (*DeleteImportedKeyMaterialOutput, error) {
req, out := c.DeleteImportedKeyMaterialRequest(input)
@@ -1788,7 +2068,7 @@ func (c *KMS) DescribeCustomKeyStoresRequest(input *DescribeCustomKeyStoresInput
InputTokens: []string{"Marker"},
OutputTokens: []string{"NextMarker"},
LimitToken: "Limit",
- TruncationToken: "Truncated",
+ TruncationToken: "",
},
}
@@ -1806,30 +2086,37 @@ func (c *KMS) DescribeCustomKeyStoresRequest(input *DescribeCustomKeyStoresInput
// Gets information about custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
// in the account and Region.
//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
+// of KMS with the isolation and control of a key store that you own and manage.
//
// By default, this operation returns information about all custom key stores
// in the account and Region. To get only information about a particular custom
// key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter
// (but not both).
//
-// To determine whether the custom key store is connected to its CloudHSM cluster,
-// use the ConnectionState element in the response. If an attempt to connect
-// the custom key store failed, the ConnectionState value is FAILED and the
-// ConnectionErrorCode element in the response indicates the cause of the failure.
-// For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.
+// To determine whether the custom key store is connected to its CloudHSM cluster
+// or external key store proxy, use the ConnectionState element in the response.
+// If an attempt to connect the custom key store failed, the ConnectionState
+// value is FAILED and the ConnectionErrorCode element in the response indicates
+// the cause of the failure. For help interpreting the ConnectionErrorCode,
+// see CustomKeyStoresListEntry.
//
// Custom key stores have a DISCONNECTED connection state if the key store has
-// never been connected or you use the DisconnectCustomKeyStore operation to
-// disconnect it. If your custom key store state is CONNECTED but you are having
-// trouble using it, make sure that its associated CloudHSM cluster is active
-// and contains the minimum number of HSMs required for the operation, if any.
-//
-// For help repairing your custom key store, see the Troubleshooting Custom
-// Key Stores (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
-// topic in the Key Management Service Developer Guide.
+// never been connected or you used the DisconnectCustomKeyStore operation to
+// disconnect it. Otherwise, the connection state is CONNECTED. If your custom
+// key store connection state is CONNECTED but you are having trouble using
+// it, verify that the backing store is active and available. For an CloudHSM
+// key store, verify that the associated CloudHSM cluster is active and contains
+// the minimum number of HSMs required for the operation, if any. For an external
+// key store, verify that the external key store proxy and its associated external
+// key manager are reachable and enabled.
+//
+// For help repairing your CloudHSM key store, see the Troubleshooting CloudHSM
+// key stores (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html).
+// For help repairing your external key store, see the Troubleshooting external
+// key stores (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html).
+// Both topics are in the Key Management Service Developer Guide.
//
// Cross-account use: No. You cannot perform this operation on a custom key
// store in a different Amazon Web Services account.
@@ -1995,10 +2282,14 @@ func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request,
// any) of the key material. It includes fields, like KeySpec, that help you
// distinguish different types of KMS keys. It also displays the key usage (encryption,
// signing, or generating and verifying MACs) and the algorithms that the KMS
-// key supports. For KMS keys in custom key stores, it includes information
-// about the custom key store, such as the key store ID and the CloudHSM cluster
-// ID. For multi-Region keys, it displays the primary key and all related replica
-// keys.
+// key supports.
+//
+// For multi-Region keys (kms/latest/developerguide/multi-region-keys-overview.html),
+// DescribeKey displays the primary key and all related replica keys. For KMS
+// keys in CloudHSM key stores (kms/latest/developerguide/keystore-cloudhsm.html),
+// it includes information about the key store, such as the key store ID and
+// the CloudHSM cluster ID. For KMS keys in external key stores (kms/latest/developerguide/keystore-external.html),
+// it includes the custom key store ID and the ID of the external key.
//
// DescribeKey does not return the following information:
//
@@ -2061,8 +2352,8 @@ func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request,
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2171,8 +2462,8 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2182,10 +2473,18 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKey
func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) {
req, out := c.DisableKeyRequest(input)
@@ -2256,12 +2555,12 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re
// of the specified symmetric encryption KMS key.
//
// Automatic key rotation is supported only on symmetric encryption KMS keys.
-// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
-// The key rotation status of these KMS keys is always false. To enable or disable
-// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+// To enable or disable automatic rotation of a set of related multi-Region
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
// set the property on the primary key.
//
// You can enable (EnableKeyRotation) and disable automatic rotation of the
@@ -2311,8 +2610,8 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2322,10 +2621,18 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
@@ -2397,10 +2704,18 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp
// DisconnectCustomKeyStore API operation for AWS Key Management Service.
//
// Disconnects the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// from its associated CloudHSM cluster. While a custom key store is disconnected,
-// you can manage the custom key store and its KMS keys, but you cannot create
-// or use KMS keys in the custom key store. You can reconnect the custom key
-// store at any time.
+// from its backing key store. This operation disconnects an CloudHSM key store
+// from its associated CloudHSM cluster or disconnects an external key store
+// from the external key store proxy that communicates with your external key
+// manager.
+//
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// feature in KMS, which combines the convenience and extensive integration
+// of KMS with the isolation and control of a key store that you own and manage.
+//
+// While a custom key store is disconnected, you can manage the custom key store
+// and its KMS keys, but you cannot create or use its KMS keys. You can reconnect
+// the custom key store at any time.
//
// While a custom key store is disconnected, all attempts to create KMS keys
// in the custom key store or to use existing KMS keys in cryptographic operations
@@ -2408,16 +2723,13 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp
// will fail. This action can prevent users from storing and accessing sensitive
// data.
//
+// When you disconnect a custom key store, its ConnectionState changes to Disconnected.
// To find the connection state of a custom key store, use the DescribeCustomKeyStores
// operation. To reconnect a custom key store, use the ConnectCustomKeyStore
// operation.
//
// If the operation succeeds, it returns a JSON object with no properties.
//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
-//
// Cross-account use: No. You cannot perform this operation on a custom key
// store in a different Amazon Web Services account.
//
@@ -2452,17 +2764,27 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - CustomKeyStoreNotFoundException
// The request was rejected because KMS cannot find a custom key store with
@@ -2571,8 +2893,8 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2587,10 +2909,18 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKey
func (c *KMS) EnableKey(input *EnableKeyInput) (*EnableKeyOutput, error) {
req, out := c.EnableKeyRequest(input)
@@ -2669,12 +2999,12 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ
//
// Automatic key rotation is supported only on symmetric encryption KMS keys
// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks).
-// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
-// The key rotation status of these KMS keys is always false. To enable or disable
-// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+// To enable or disable automatic rotation of a set of related multi-Region
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
// set the property on the primary key.
//
// You cannot enable or disable automatic rotation Amazon Web Services managed
@@ -2730,8 +3060,8 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2741,10 +3071,18 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
@@ -2899,8 +3237,8 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -2930,10 +3268,18 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Encrypt
func (c *KMS) Encrypt(input *EncryptInput) (*EncryptOutput, error) {
req, out := c.EncryptRequest(input)
@@ -3014,9 +3360,9 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
// or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data
// keys, use the KeySpec parameter.
//
-// To generate an SM4 data key (China Regions only), specify a KeySpec value
-// of AES_128 or NumberOfBytes value of 128. The symmetric encryption key used
-// in China Regions to encrypt your data key is an SM4 encryption key.
+// To generate a 128-bit SM4 data key (China Regions only), specify a KeySpec
+// value of AES_128 or a NumberOfBytes value of 16. The symmetric encryption
+// key used in China Regions to encrypt your data key is an SM4 encryption key.
//
// To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext.
// To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext
@@ -3106,8 +3452,8 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -3137,10 +3483,18 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKey
func (c *KMS) GenerateDataKey(input *GenerateDataKeyInput) (*GenerateDataKeyOutput, error) {
req, out := c.GenerateDataKeyRequest(input)
@@ -3296,8 +3650,8 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -3327,10 +3681,18 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
@@ -3479,8 +3841,8 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -3510,10 +3872,18 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
@@ -3612,6 +3982,14 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho
// or a key in a custom key store to generate a data key. To get the type of
// your KMS key, use the DescribeKey operation.
//
+// You must also specify the length of the data key. Use either the KeySpec
+// or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data
+// keys, use the KeySpec parameter.
+//
+// To generate an SM4 data key (China Regions only), specify a KeySpec value
+// of AES_128 or NumberOfBytes value of 128. The symmetric encryption key used
+// in China Regions to encrypt your data key is an SM4 encryption key.
+//
// If the operation succeeds, you will find the encrypted copy of the data key
// in the CiphertextBlob field.
//
@@ -3666,8 +4044,8 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -3697,10 +4075,18 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintext
func (c *KMS) GenerateDataKeyWithoutPlaintext(input *GenerateDataKeyWithoutPlaintextInput) (*GenerateDataKeyWithoutPlaintextOutput, error) {
req, out := c.GenerateDataKeyWithoutPlaintextRequest(input)
@@ -3767,15 +4153,18 @@ func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request,
// GenerateMac API operation for AWS Key Management Service.
//
// Generates a hash-based message authentication code (HMAC) for a message using
-// an HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm
-// computes the HMAC for the message and the key as described in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104).
-//
-// You can use the HMAC that this operation generates with the VerifyMac operation
-// to demonstrate that the original message has not changed. Also, because a
-// secret key is used to create the hash, you can verify that the party that
-// generated the hash has the required secret key. This operation is part of
-// KMS support for HMAC KMS keys. For details, see HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html)
-// in the Key Management Service Developer Guide .
+// an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys
+// and the HMAC algorithms that KMS uses conform to industry standards defined
+// in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104).
+//
+// You can use value that GenerateMac returns in the VerifyMac operation to
+// demonstrate that the original message has not changed. Also, because a secret
+// key is used to create the hash, you can verify that the party that generated
+// the hash has the required secret key. You can also use the raw result to
+// implement HMAC-based algorithms such as key derivation functions. This operation
+// is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS
+// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the
+// Key Management Service Developer Guide .
//
// Best practices recommend that you limit the time during which any signing
// mechanism, including an HMAC, is effective. This deters an attack where the
@@ -3845,10 +4234,18 @@ func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateMac
func (c *KMS) GenerateMac(input *GenerateMacInput) (*GenerateMacOutput, error) {
req, out := c.GenerateMacRequest(input)
@@ -3920,9 +4317,8 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re
// byte string. There is no default value for string length.
//
// By default, the random byte string is generated in KMS. To generate the byte
-// string in the CloudHSM cluster that is associated with a custom key store
-// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
-// specify the custom key store ID.
+// string in the CloudHSM cluster associated with an CloudHSM key store, use
+// the CustomKeyStoreId parameter.
//
// Applications in Amazon Web Services Nitro Enclaves can call this operation
// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c).
@@ -3949,13 +4345,17 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
//
+// - UnsupportedOperationException
+// The request was rejected because a specified parameter is not supported or
+// a specified resource is not valid for this operation.
+//
// - CustomKeyStoreNotFoundException
// The request was rejected because KMS cannot find a custom key store with
// the specified key store name or ID.
@@ -3967,17 +4367,27 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandom
func (c *KMS) GenerateRandom(input *GenerateRandomInput) (*GenerateRandomOutput, error) {
@@ -4072,8 +4482,8 @@ func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Reques
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -4083,10 +4493,18 @@ func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Reques
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicy
func (c *KMS) GetKeyPolicy(input *GetKeyPolicyInput) (*GetKeyPolicyOutput, error) {
req, out := c.GetKeyPolicyRequest(input)
@@ -4163,12 +4581,12 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req
//
// Automatic key rotation is supported only on symmetric encryption KMS keys
// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks).
-// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
-// The key rotation status of these KMS keys is always false. To enable or disable
-// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+// To enable or disable automatic rotation of a set of related multi-Region
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
// set the property on the primary key..
//
// You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation)
@@ -4228,8 +4646,8 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -4239,10 +4657,18 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
@@ -4322,11 +4748,11 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput)
// a subsequent ImportKeyMaterial request.
//
// You must specify the key ID of the symmetric encryption KMS key into which
-// you will import key material. This KMS key's Origin must be EXTERNAL. You
-// must also specify the wrapping algorithm and type of wrapping key (public
-// key) that you will use to encrypt the key material. You cannot perform this
-// operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in
-// a different Amazon Web Services account.
+// you will import key material. The KMS key Origin must be EXTERNAL. You must
+// also specify the wrapping algorithm and type of wrapping key (public key)
+// that you will use to encrypt the key material. You cannot perform this operation
+// on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different
+// Amazon Web Services account.
//
// To import key material, you must use the public key and import token from
// the same response. These items are valid for 24 hours. The expiration date
@@ -4368,8 +4794,8 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput)
// a specified resource is not valid for this operation.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -4383,10 +4809,18 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput)
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImport
func (c *KMS) GetParametersForImport(input *GetParametersForImportInput) (*GetParametersForImportOutput, error) {
req, out := c.GetParametersForImportRequest(input)
@@ -4467,11 +4901,6 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// are part of every KMS operation. You also reduce of risk of encrypting data
// that cannot be decrypted. These features are not effective outside of KMS.
//
-// To verify a signature outside of KMS with an SM2 public key (China Regions
-// only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678
-// as the distinguishing ID. For more information, see Offline verification
-// with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
-//
// To help you use the public key safely outside of KMS, GetPublicKey returns
// important information about the public key in the response, including:
//
@@ -4493,6 +4922,11 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// algorithm that is not supported by KMS. You can also avoid errors, such as
// using the wrong signing algorithm in a verification operation.
//
+// To verify a signature outside of KMS with an SM2 public key (China Regions
+// only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678
+// as the distinguishing ID. For more information, see Offline verification
+// with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
+//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide.
@@ -4527,8 +4961,8 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
@@ -4566,10 +5000,18 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKey
func (c *KMS) GetPublicKey(input *GetPublicKeyInput) (*GetPublicKeyOutput, error) {
req, out := c.GetPublicKeyRequest(input)
@@ -4664,11 +5106,13 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ
// - The import token that GetParametersForImport returned. You must use
// a public key and token from the same GetParametersForImport response.
//
-// - Whether the key material expires and if so, when. If you set an expiration
-// date, KMS deletes the key material from the KMS key on the specified date,
-// and the KMS key becomes unusable. To use the KMS key again, you must reimport
-// the same key material. The only way to change an expiration date is by
-// reimporting the same key material and specifying a new expiration date.
+// - Whether the key material expires (ExpirationModel) and, if so, when
+// (ValidTo). If you set an expiration date, on the specified date, KMS deletes
+// the key material from the KMS key, making the KMS key unusable. To use
+// the KMS key in cryptographic operations again, you must reimport the same
+// key material. The only way to change the expiration model or expiration
+// date is by reimporting the same key material and specifying a new expiration
+// date.
//
// When this operation is successful, the key state of the KMS key changes from
// PendingImport to Enabled, and you can use the KMS key.
@@ -4714,8 +5158,8 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ
// a specified resource is not valid for this operation.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -4729,10 +5173,18 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - InvalidCiphertextException
// From the Decrypt or ReEncrypt operation, the request was rejected because
// the specified ciphertext, or additional authenticated data incorporated into
@@ -4812,7 +5264,7 @@ func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request,
InputTokens: []string{"Marker"},
OutputTokens: []string{"NextMarker"},
LimitToken: "Limit",
- TruncationToken: "Truncated",
+ TruncationToken: "",
},
}
@@ -4873,8 +5325,8 @@ func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request,
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidMarkerException
// The request was rejected because the marker that specifies where pagination
@@ -4999,7 +5451,7 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o
InputTokens: []string{"Marker"},
OutputTokens: []string{"NextMarker"},
LimitToken: "Limit",
- TruncationToken: "Truncated",
+ TruncationToken: "",
},
}
@@ -5061,8 +5513,8 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o
// be found.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidMarkerException
// The request was rejected because the marker that specifies where pagination
@@ -5083,10 +5535,18 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrants
func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) {
req, out := c.ListGrantsRequest(input)
@@ -5194,7 +5654,7 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.
InputTokens: []string{"Marker"},
OutputTokens: []string{"NextMarker"},
LimitToken: "Limit",
- TruncationToken: "Truncated",
+ TruncationToken: "",
},
}
@@ -5243,8 +5703,8 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -5254,10 +5714,18 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPolicies
func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutput, error) {
req, out := c.ListKeyPoliciesRequest(input)
@@ -5365,7 +5833,7 @@ func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, outpu
InputTokens: []string{"Marker"},
OutputTokens: []string{"NextMarker"},
LimitToken: "Limit",
- TruncationToken: "Truncated",
+ TruncationToken: "",
},
}
@@ -5409,8 +5877,8 @@ func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, outpu
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -5527,7 +5995,7 @@ func (c *KMS) ListResourceTagsRequest(input *ListResourceTagsInput) (req *reques
InputTokens: []string{"Marker"},
OutputTokens: []string{"NextMarker"},
LimitToken: "Limit",
- TruncationToken: "Truncated",
+ TruncationToken: "",
},
}
@@ -5697,7 +6165,7 @@ func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *
InputTokens: []string{"Marker"},
OutputTokens: []string{"NextMarker"},
LimitToken: "Limit",
- TruncationToken: "Truncated",
+ TruncationToken: "",
},
}
@@ -5755,8 +6223,8 @@ func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidMarkerException
// The request was rejected because the marker that specifies where pagination
@@ -5931,8 +6399,8 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques
// or semantically correct.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
@@ -5951,10 +6419,18 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicy
func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error) {
req, out := c.PutKeyPolicyRequest(input)
@@ -6056,20 +6532,20 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out
// was encrypted under a different KMS key, the ReEncrypt operation fails.
// This practice ensures that you use the KMS key that you intend.
//
-// - To reencrypt the data, you must use the DestinationKeyId parameter specify
-// the KMS key that re-encrypts the data after it is decrypted. If the destination
-// KMS key is an asymmetric KMS key, you must also provide the encryption
-// algorithm. The algorithm that you choose must be compatible with the KMS
-// key. When you use an asymmetric KMS key to encrypt or reencrypt data,
-// be sure to record the KMS key and encryption algorithm that you choose.
-// You will be required to provide the same KMS key and encryption algorithm
-// when you decrypt the data. If the KMS key and algorithm do not match the
-// values used to encrypt the data, the decrypt operation fails. You are
-// not required to supply the key ID and encryption algorithm when you decrypt
-// with symmetric encryption KMS keys because KMS stores this information
-// in the ciphertext blob. KMS cannot store metadata in ciphertext generated
-// with asymmetric keys. The standard format for asymmetric key ciphertext
-// does not include configurable fields.
+// - To reencrypt the data, you must use the DestinationKeyId parameter to
+// specify the KMS key that re-encrypts the data after it is decrypted. If
+// the destination KMS key is an asymmetric KMS key, you must also provide
+// the encryption algorithm. The algorithm that you choose must be compatible
+// with the KMS key. When you use an asymmetric KMS key to encrypt or reencrypt
+// data, be sure to record the KMS key and encryption algorithm that you
+// choose. You will be required to provide the same KMS key and encryption
+// algorithm when you decrypt the data. If the KMS key and algorithm do not
+// match the values used to encrypt the data, the decrypt operation fails.
+// You are not required to supply the key ID and encryption algorithm when
+// you decrypt with symmetric encryption KMS keys because KMS stores this
+// information in the ciphertext blob. KMS cannot store metadata in ciphertext
+// generated with asymmetric keys. The standard format for asymmetric key
+// ciphertext does not include configurable fields.
//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
@@ -6140,8 +6616,8 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out
// must identify the same KMS key that was used to encrypt the ciphertext.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -6171,10 +6647,18 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncrypt
func (c *KMS) ReEncrypt(input *ReEncryptInput) (*ReEncryptOutput, error) {
req, out := c.ReEncryptRequest(input)
@@ -6348,10 +6832,18 @@ func (c *KMS) ReplicateKeyRequest(input *ReplicateKeyInput) (req *request.Reques
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
@@ -6500,8 +6992,8 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request,
// be found.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -6511,10 +7003,18 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrant
func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) {
req, out := c.RetireGrantRequest(input)
@@ -6628,8 +7128,8 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request,
// be found.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidArnException
// The request was rejected because a specified ARN, or an ARN in a key policy,
@@ -6646,10 +7146,18 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrant
func (c *KMS) RevokeGrant(input *RevokeGrantInput) (*RevokeGrantOutput, error) {
req, out := c.RevokeGrantRequest(input)
@@ -6730,13 +7238,6 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *
// is unrecoverable. (The only exception is a multi-Region replica key.) To
// prevent the use of a KMS key without deleting it, use DisableKey.
//
-// If you schedule deletion of a KMS key from a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
-// when the waiting period expires, ScheduleKeyDeletion deletes the KMS key
-// from KMS. Then KMS makes a best effort to delete the key material from the
-// associated CloudHSM cluster. However, you might need to manually delete the
-// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key)
-// from the cluster and its backups.
-//
// You can schedule the deletion of a multi-Region primary key and its replica
// keys at any time. However, KMS will not delete a multi-Region primary key
// with existing replica keys. If you schedule the deletion of a primary key
@@ -6748,6 +7249,18 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *
// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html)
// in the Key Management Service Developer Guide.
//
+// When KMS deletes a KMS key from an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html),
+// it makes a best effort to delete the associated key material from the associated
+// CloudHSM cluster. However, you might need to manually delete the orphaned
+// key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key)
+// from the cluster and its backups. Deleting a KMS key from an external key
+// store (https://docs.aws.amazon.com/kms/latest/developerguide/delete-xks-key.html)
+// has no effect on the associated external key. However, for both types of
+// custom key stores, deleting a KMS key is destructive and irreversible. You
+// cannot decrypt ciphertext encrypted under the KMS key by using only its associated
+// external key or CloudHSM key. Also, you cannot recreate a KMS key in an external
+// key store by creating a new KMS key with the same key material.
+//
// For more information about scheduling a KMS key for deletion, see Deleting
// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html)
// in the Key Management Service Developer Guide.
@@ -6785,8 +7298,8 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -6796,10 +7309,18 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion
func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) {
req, out := c.ScheduleKeyDeletionRequest(input)
@@ -6940,8 +7461,8 @@ func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignO
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -6971,10 +7492,18 @@ func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignO
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Sign
func (c *KMS) Sign(input *SignInput) (*SignOutput, error) {
req, out := c.SignRequest(input)
@@ -7044,7 +7573,7 @@ func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request,
// Adds or edits tags on a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk).
//
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
-// For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// Each tag consists of a tag key and a tag value, both of which are case-sensitive
@@ -7111,10 +7640,18 @@ func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - LimitExceededException
// The request was rejected because a quota was exceeded. For more information,
// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html)
@@ -7193,7 +7730,7 @@ func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ
// To delete a tag, specify the tag key and the KMS key.
//
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
-// For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// When it succeeds, the UntagResource operation doesn't return any output.
@@ -7251,10 +7788,18 @@ func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - TagException
// The request was rejected because one or more tags are not valid.
//
@@ -7330,14 +7875,14 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request,
// account and Region.
//
// Adding, deleting, or updating an alias can allow or deny permission to the
-// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// The current and new KMS key must be the same type (both symmetric or both
-// asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY).
-// This restriction prevents errors in code that uses aliases. If you must assign
-// an alias to a different type of KMS key, use DeleteAlias to delete the old
-// alias and CreateAlias to create a new alias.
+// asymmetric or both HMAC), and they must have the same key usage. This restriction
+// prevents errors in code that uses aliases. If you must assign an alias to
+// a different type of KMS key, use DeleteAlias to delete the old alias and
+// CreateAlias to create a new alias.
//
// You cannot use UpdateAlias to change an alias name. To change an alias name,
// use DeleteAlias to delete the old alias and CreateAlias to create a new alias.
@@ -7386,8 +7931,8 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request,
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -7406,10 +7951,18 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAlias
func (c *KMS) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) {
req, out := c.UpdateAliasRequest(input)
@@ -7476,42 +8029,70 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req
// UpdateCustomKeyStore API operation for AWS Key Management Service.
//
-// Changes the properties of a custom key store. Use the CustomKeyStoreId parameter
-// to identify the custom key store you want to edit. Use the remaining parameters
-// to change the properties of the custom key store.
-//
-// You can only update a custom key store that is disconnected. To disconnect
-// the custom key store, use DisconnectCustomKeyStore. To reconnect the custom
-// key store after the update completes, use ConnectCustomKeyStore. To find
-// the connection state of a custom key store, use the DescribeCustomKeyStores
-// operation.
-//
-// The CustomKeyStoreId parameter is required in all commands. Use the other
-// parameters of UpdateCustomKeyStore to edit your key store settings.
-//
-// - Use the NewCustomKeyStoreName parameter to change the friendly name
-// of the custom key store to the value that you specify.
+// Changes the properties of a custom key store. You can use this operation
+// to change the properties of an CloudHSM key store or an external key store.
//
-// - Use the KeyStorePassword parameter tell KMS the current password of
-// the kmsuser crypto user (CU) (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser)
-// in the associated CloudHSM cluster. You can use this parameter to fix
-// connection failures (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password)
-// that occur when KMS cannot log into the associated cluster because the
-// kmsuser password has changed. This value does not change the password
-// in the CloudHSM cluster.
+// Use the required CustomKeyStoreId parameter to identify the custom key store.
+// Use the remaining optional parameters to change its properties. This operation
+// does not return any property values. To verify the updated property values,
+// use the DescribeCustomKeyStores operation.
//
-// - Use the CloudHsmClusterId parameter to associate the custom key store
-// with a different, but related, CloudHSM cluster. You can use this parameter
-// to repair a custom key store if its CloudHSM cluster becomes corrupted
-// or is deleted, or when you need to create or restore a cluster from a
-// backup.
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// feature in KMS, which combines the convenience and extensive integration
+// of KMS with the isolation and control of a key store that you own and manage.
+//
+// When updating the properties of an external key store, verify that the updated
+// settings connect your key store, via the external key store proxy, to the
+// same external key manager as the previous settings, or to a backup or snapshot
+// of the external key manager with the same cryptographic keys. If the updated
+// connection settings fail, you can fix them and retry, although an extended
+// delay might disrupt Amazon Web Services services. However, if KMS permanently
+// loses its access to cryptographic keys, ciphertext encrypted under those
+// keys is unrecoverable.
+//
+// For external key stores:
+//
+// Some external key managers provide a simpler method for updating an external
+// key store. For details, see your external key manager documentation.
+//
+// When updating an external key store in the KMS console, you can upload a
+// JSON-based proxy configuration file with the desired values. You cannot upload
+// the proxy configuration file to the UpdateCustomKeyStore operation. However,
+// you can use the file to help you determine the correct values for the UpdateCustomKeyStore
+// parameters.
+//
+// For an CloudHSM key store, you can use this operation to change the custom
+// key store friendly name (NewCustomKeyStoreName), to tell KMS about a change
+// to the kmsuser crypto user password (KeyStorePassword), or to associate the
+// custom key store with a different, but related, CloudHSM cluster (CloudHsmClusterId).
+// To update any property of an CloudHSM key store, the ConnectionState of the
+// CloudHSM key store must be DISCONNECTED.
+//
+// For an external key store, you can use this operation to change the custom
+// key store friendly name (NewCustomKeyStoreName), or to tell KMS about a change
+// to the external key store proxy authentication credentials (XksProxyAuthenticationCredential),
+// connection method (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint)
+// and path (XksProxyUriPath). For external key stores with an XksProxyConnectivity
+// of VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service
+// name (XksProxyVpcEndpointServiceName). To update most properties of an external
+// key store, the ConnectionState of the external key store must be DISCONNECTED.
+// However, you can update the CustomKeyStoreName, XksProxyAuthenticationCredential,
+// and XksProxyUriPath of an external key store when it is in the CONNECTED
+// or DISCONNECTED state.
+//
+// If your update requires a DISCONNECTED state, before using UpdateCustomKeyStore,
+// use the DisconnectCustomKeyStore operation to disconnect the custom key store.
+// After the UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore
+// to reconnect the custom key store. To find the ConnectionState of the custom
+// key store, use the DescribeCustomKeyStores operation.
+//
+// Before updating the custom key store, verify that the new values allow KMS
+// to connect the custom key store to its backing key store. For example, before
+// you change the XksProxyUriPath value, verify that the external key store
+// proxy is reachable at the new path.
//
// If the operation succeeds, it returns a JSON object with no properties.
//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
-//
// Cross-account use: No. You cannot perform this operation on a custom key
// store in a different Amazon Web Services account.
//
@@ -7555,15 +8136,16 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req
// - CloudHsmClusterNotRelatedException
// The request was rejected because the specified CloudHSM cluster has a different
// cluster certificate than the original cluster. You cannot use the operation
-// to specify an unrelated cluster.
+// to specify an unrelated cluster for an CloudHSM key store.
//
-// Specify a cluster that shares a backup history with the original cluster.
-// This includes clusters that were created from a backup of the current cluster,
-// and clusters that were created from the same backup that produced the current
-// cluster.
+// Specify an CloudHSM cluster that shares a backup history with the original
+// cluster. This includes clusters that were created from a backup of the current
+// cluster, and clusters that were created from the same backup that produced
+// the current cluster.
//
-// Clusters that share a backup history have the same cluster certificate. To
-// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// CloudHSM clusters that share a backup history have the same cluster certificate.
+// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
//
// - CustomKeyStoreInvalidStateException
@@ -7573,60 +8155,126 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
//
// - CloudHsmClusterNotActiveException
-// The request was rejected because the CloudHSM cluster that is associated
-// with the custom key store is not active. Initialize and activate the cluster
-// and try the command again. For detailed instructions, see Getting Started
-// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+// The request was rejected because the CloudHSM cluster associated with the
+// CloudHSM key store is not active. Initialize and activate the cluster and
+// try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
//
// - CloudHsmClusterInvalidConfigurationException
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// in the CloudHSM User Guide .
//
+// - XksProxyUriInUseException
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// and XksProxyUriPath is already associated with an external key store in the
+// Amazon Web Services account and Region. Each external key store in an account
+// and Region must use a unique external key store proxy API address.
+//
+// - XksProxyUriEndpointInUseException
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// is already associated with an external key store in the Amazon Web Services
+// account and Region. Each external key store in an account and Region must
+// use a unique external key store proxy address.
+//
+// - XksProxyUriUnreachableException
+// KMS was unable to reach the specified XksProxyUriPath. The path must be reachable
+// before you create the external key store or update its settings.
+//
+// This exception is also thrown when the external key store proxy response
+// to a GetHealthStatus request indicates that all external key manager instances
+// are unavailable.
+//
+// - XksProxyIncorrectAuthenticationCredentialException
+// The request was rejected because the proxy credentials failed to authenticate
+// to the specified external key store proxy. The specified external key store
+// proxy rejected a status request from KMS due to invalid credentials. This
+// can indicate an error in the credentials or in the identification of the
+// external key store proxy.
+//
+// - XksProxyVpcEndpointServiceInUseException
+// The request was rejected because the specified Amazon VPC endpoint service
+// is already associated with an external key store in the Amazon Web Services
+// account and Region. Each external key store in an Amazon Web Services account
+// and Region must use a different Amazon VPC endpoint service.
+//
+// - XksProxyVpcEndpointServiceNotFoundException
+// The request was rejected because KMS could not find the specified VPC endpoint
+// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name
+// for the external key store. Also, confirm that the Allow principals list
+// for the VPC endpoint service includes the KMS service principal for the Region,
+// such as cks.kms.us-east-1.amazonaws.com.
+//
+// - XksProxyVpcEndpointServiceInvalidConfigurationException
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store proxy. For details,
+// see the exception message and review the requirements (kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+// for Amazon VPC endpoint service connectivity for an external key store.
+//
+// - XksProxyInvalidResponseException
+// KMS cannot interpret the response it received from the external key store
+// proxy. The problem might be a poorly constructed response, but it could also
+// be a transient network issue. If you see this error repeatedly, report it
+// to the proxy vendor.
+//
+// - XksProxyInvalidConfigurationException
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store proxy. For details,
+// see the exception message.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore
func (c *KMS) UpdateCustomKeyStore(input *UpdateCustomKeyStoreInput) (*UpdateCustomKeyStoreOutput, error) {
req, out := c.UpdateCustomKeyStoreRequest(input)
@@ -7730,8 +8378,8 @@ func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -7741,10 +8389,18 @@ func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescription
func (c *KMS) UpdateKeyDescription(input *UpdateKeyDescriptionInput) (*UpdateKeyDescriptionOutput, error) {
req, out := c.UpdateKeyDescriptionRequest(input)
@@ -7902,10 +8558,18 @@ func (c *KMS) UpdatePrimaryRegionRequest(input *UpdatePrimaryRegionInput) (req *
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
@@ -7997,22 +8661,25 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V
// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
// in the Key Management Service Developer Guide.
//
-// To verify a digital signature, you can use the Verify operation. Specify
-// the same asymmetric KMS key, message, and signing algorithm that were used
-// to produce the signature.
+// To use the Verify operation, specify the same asymmetric KMS key, message,
+// and signing algorithm that were used to produce the signature. The message
+// type does not need to be the same as the one used for signing, but it must
+// indicate whether the value of the Message parameter should be hashed as part
+// of the verification process.
//
// You can also verify the digital signature by using the public key of the
// KMS key outside of KMS. Use the GetPublicKey operation to download the public
// key in the asymmetric KMS key and then use the public key to verify the signature
-// outside of KMS. To verify a signature outside of KMS with an SM2 public key,
-// you must specify the distinguishing ID. By default, KMS uses 1234567812345678
+// outside of KMS. The advantage of using the Verify operation is that it is
+// performed within KMS. As a result, it's easy to call, the operation is performed
+// within the FIPS boundary, it is logged in CloudTrail, and you can use key
+// policy and IAM policy to determine who is authorized to use the KMS key to
+// verify signatures.
+//
+// To verify a signature outside of KMS with an SM2 public key (China Regions
+// only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678
// as the distinguishing ID. For more information, see Offline verification
-// with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification)
-// in Key Management Service Developer Guide. The advantage of using the Verify
-// operation is that it is performed within KMS. As a result, it's easy to call,
-// the operation is performed within the FIPS boundary, it is logged in CloudTrail,
-// and you can use key policy and IAM policy to determine who is authorized
-// to use the KMS key to verify signatures.
+// with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
@@ -8048,8 +8715,8 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -8079,10 +8746,18 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - KMSInvalidSignatureException
// The request was rejected because the signature verification failed. Signature
// verification fails when it cannot confirm that signature was produced by
@@ -8157,10 +8832,12 @@ func (c *KMS) VerifyMacRequest(input *VerifyMacInput) (req *request.Request, out
// message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes
// an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify,
// and compares the computed HMAC to the HMAC that you specify. If the HMACs
-// are identical, the verification succeeds; otherwise, it fails.
+// are identical, the verification succeeds; otherwise, it fails. Verification
+// indicates that the message hasn't changed since the HMAC was calculated,
+// and the specified key was used to generate and verify the HMAC.
//
-// Verification indicates that the message hasn't changed since the HMAC was
-// calculated, and the specified key was used to generate and verify the HMAC.
+// HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards
+// defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104).
//
// This operation is part of KMS support for HMAC KMS keys. For details, see
// HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html)
@@ -8232,10 +8909,18 @@ func (c *KMS) VerifyMacRequest(input *VerifyMacInput) (req *request.Request, out
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyMac
func (c *KMS) VerifyMac(input *VerifyMacInput) (*VerifyMacOutput, error) {
req, out := c.VerifyMacRequest(input)
@@ -8486,12 +9171,13 @@ func (s *CancelKeyDeletionOutput) SetKeyId(v string) *CancelKeyDeletionOutput {
}
// The request was rejected because the specified CloudHSM cluster is already
-// associated with a custom key store or it shares a backup history with a cluster
-// that is associated with a custom key store. Each custom key store must be
-// associated with a different CloudHSM cluster.
+// associated with an CloudHSM key store in the account, or it shares a backup
+// history with an CloudHSM key store in the account. Each CloudHSM key store
+// in the account must be associated with a different CloudHSM cluster.
//
-// Clusters that share a backup history have the same cluster certificate. To
-// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// CloudHSM clusters that share a backup history have the same cluster certificate.
+// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
type CloudHsmClusterInUseException struct {
_ struct{} `type:"structure"`
@@ -8557,29 +9243,29 @@ func (s *CloudHsmClusterInUseException) RequestID() string {
}
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
@@ -8648,10 +9334,9 @@ func (s *CloudHsmClusterInvalidConfigurationException) RequestID() string {
return s.RespMetadata.RequestID
}
-// The request was rejected because the CloudHSM cluster that is associated
-// with the custom key store is not active. Initialize and activate the cluster
-// and try the command again. For detailed instructions, see Getting Started
-// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+// The request was rejected because the CloudHSM cluster associated with the
+// CloudHSM key store is not active. Initialize and activate the cluster and
+// try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
type CloudHsmClusterNotActiveException struct {
_ struct{} `type:"structure"`
@@ -8783,15 +9468,16 @@ func (s *CloudHsmClusterNotFoundException) RequestID() string {
// The request was rejected because the specified CloudHSM cluster has a different
// cluster certificate than the original cluster. You cannot use the operation
-// to specify an unrelated cluster.
+// to specify an unrelated cluster for an CloudHSM key store.
//
-// Specify a cluster that shares a backup history with the original cluster.
-// This includes clusters that were created from a backup of the current cluster,
-// and clusters that were created from the same backup that produced the current
-// cluster.
+// Specify an CloudHSM cluster that shares a backup history with the original
+// cluster. This includes clusters that were created from a backup of the current
+// cluster, and clusters that were created from the same backup that produced
+// the current cluster.
//
-// Clusters that share a backup history have the same cluster certificate. To
-// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// CloudHSM clusters that share a backup history have the same cluster certificate.
+// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
type CloudHsmClusterNotRelatedException struct {
_ struct{} `type:"structure"`
@@ -9042,18 +9728,33 @@ func (s CreateAliasOutput) GoString() string {
type CreateCustomKeyStoreInput struct {
_ struct{} `type:"structure"`
- // Identifies the CloudHSM cluster for the custom key store. Enter the cluster
- // ID of any active CloudHSM cluster that is not already associated with a custom
- // key store. To find the cluster ID, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+ // Identifies the CloudHSM cluster for an CloudHSM key store. This parameter
+ // is required for custom key stores with CustomKeyStoreType of AWS_CLOUDHSM.
+ //
+ // Enter the cluster ID of any active CloudHSM cluster that is not already associated
+ // with a custom key store. To find the cluster ID, use the DescribeClusters
+ // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
CloudHsmClusterId *string `min:"19" type:"string"`
// Specifies a friendly name for the custom key store. The name must be unique
- // in your Amazon Web Services account.
+ // in your Amazon Web Services account and Region. This parameter is required
+ // for all custom key stores.
//
// CustomKeyStoreName is a required field
CustomKeyStoreName *string `min:"1" type:"string" required:"true"`
+ // Specifies the type of custom key store. The default value is AWS_CLOUDHSM.
+ //
+ // For a custom key store backed by an CloudHSM cluster, omit the parameter
+ // or enter AWS_CLOUDHSM. For a custom key store backed by an external key manager
+ // outside of Amazon Web Services, enter EXTERNAL_KEY_STORE. You cannot change
+ // this property after the key store is created.
+ CustomKeyStoreType *string `type:"string" enum:"CustomKeyStoreType"`
+
+ // Specifies the kmsuser password for an CloudHSM key store. This parameter
+ // is required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.
+ //
// Enter the password of the kmsuser crypto user (CU) account (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser)
// in the specified CloudHSM cluster. KMS logs into the cluster as this user
// to manage key material on your behalf.
@@ -9068,10 +9769,118 @@ type CreateCustomKeyStoreInput struct {
// String and GoString methods.
KeyStorePassword *string `min:"7" type:"string" sensitive:"true"`
- // Enter the content of the trust anchor certificate for the cluster. This is
- // the content of the customerCA.crt file that you created when you initialized
- // the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html).
+ // Specifies the certificate for an CloudHSM key store. This parameter is required
+ // for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.
+ //
+ // Enter the content of the trust anchor certificate for the CloudHSM cluster.
+ // This is the content of the customerCA.crt file that you created when you
+ // initialized the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html).
TrustAnchorCertificate *string `min:"1" type:"string"`
+
+ // Specifies an authentication credential for the external key store proxy (XKS
+ // proxy). This parameter is required for all custom key stores with a CustomKeyStoreType
+ // of EXTERNAL_KEY_STORE.
+ //
+ // The XksProxyAuthenticationCredential has two required elements: RawSecretAccessKey,
+ // a secret key, and AccessKeyId, a unique identifier for the RawSecretAccessKey.
+ // For character requirements, see XksProxyAuthenticationCredentialType (kms/latest/APIReference/API_XksProxyAuthenticationCredentialType.html).
+ //
+ // KMS uses this authentication credential to sign requests to the external
+ // key store proxy on your behalf. This credential is unrelated to Identity
+ // and Access Management (IAM) and Amazon Web Services credentials.
+ //
+ // This parameter doesn't set or change the authentication credentials on the
+ // XKS proxy. It just tells KMS the credential that you established on your
+ // external key store proxy. If you rotate your proxy authentication credential,
+ // use the UpdateCustomKeyStore operation to provide the new credential to KMS.
+ XksProxyAuthenticationCredential *XksProxyAuthenticationCredentialType `type:"structure"`
+
+ // Indicates how KMS communicates with the external key store proxy. This parameter
+ // is required for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // If the external key store proxy uses a public endpoint, specify PUBLIC_ENDPOINT.
+ // If the external key store proxy uses a Amazon VPC endpoint service for communication
+ // with KMS, specify VPC_ENDPOINT_SERVICE. For help making this choice, see
+ // Choosing a connectivity option (https://docs.aws.amazon.com/kms/latest/developerguide/plan-xks-keystore.html#choose-xks-connectivity)
+ // in the Key Management Service Developer Guide.
+ //
+ // An Amazon VPC endpoint service keeps your communication with KMS in a private
+ // address space entirely within Amazon Web Services, but it requires more configuration,
+ // including establishing a Amazon VPC with multiple subnets, a VPC endpoint
+ // service, a network load balancer, and a verified private DNS name. A public
+ // endpoint is simpler to set up, but it might be slower and might not fulfill
+ // your security requirements. You might consider testing with a public endpoint,
+ // and then establishing a VPC endpoint service for production tasks. Note that
+ // this choice does not determine the location of the external key store proxy.
+ // Even if you choose a VPC endpoint service, the proxy can be hosted within
+ // the VPC or outside of Amazon Web Services such as in your corporate data
+ // center.
+ XksProxyConnectivity *string `type:"string" enum:"XksProxyConnectivityType"`
+
+ // Specifies the endpoint that KMS uses to send requests to the external key
+ // store proxy (XKS proxy). This parameter is required for custom key stores
+ // with a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // The protocol must be HTTPS. KMS communicates on port 443. Do not specify
+ // the port in the XksProxyUriEndpoint value.
+ //
+ // For external key stores with XksProxyConnectivity value of VPC_ENDPOINT_SERVICE,
+ // specify https:// followed by the private DNS name of the VPC endpoint service.
+ //
+ // For external key stores with PUBLIC_ENDPOINT connectivity, this endpoint
+ // must be reachable before you create the custom key store. KMS connects to
+ // the external key store proxy while creating the custom key store. For external
+ // key stores with VPC_ENDPOINT_SERVICE connectivity, KMS connects when you
+ // call the ConnectCustomKeyStore operation.
+ //
+ // The value of this parameter must begin with https://. The remainder can contain
+ // upper and lower case letters (A-Z and a-z), numbers (0-9), dots (.), and
+ // hyphens (-). Additional slashes (/ and \) are not permitted.
+ //
+ // Uniqueness requirements:
+ //
+ // * The combined XksProxyUriEndpoint and XksProxyUriPath values must be
+ // unique in the Amazon Web Services account and Region.
+ //
+ // * An external key store with PUBLIC_ENDPOINT connectivity cannot use the
+ // same XksProxyUriEndpoint value as an external key store with VPC_ENDPOINT_SERVICE
+ // connectivity in the same Amazon Web Services Region.
+ //
+ // * Each external key store with VPC_ENDPOINT_SERVICE connectivity must
+ // have its own private DNS name. The XksProxyUriEndpoint value for external
+ // key stores with VPC_ENDPOINT_SERVICE connectivity (private DNS name) must
+ // be unique in the Amazon Web Services account and Region.
+ XksProxyUriEndpoint *string `min:"10" type:"string"`
+
+ // Specifies the base path to the proxy APIs for this external key store. To
+ // find this value, see the documentation for your external key store proxy.
+ // This parameter is required for all custom key stores with a CustomKeyStoreType
+ // of EXTERNAL_KEY_STORE.
+ //
+ // The value must start with / and must end with /kms/xks/v1 where v1 represents
+ // the version of the KMS external key store proxy API. This path can include
+ // an optional prefix between the required elements such as /prefix/kms/xks/v1.
+ //
+ // Uniqueness requirements:
+ //
+ // * The combined XksProxyUriEndpoint and XksProxyUriPath values must be
+ // unique in the Amazon Web Services account and Region.
+ XksProxyUriPath *string `min:"10" type:"string"`
+
+ // Specifies the name of the Amazon VPC endpoint service for interface endpoints
+ // that is used to communicate with your external key store proxy (XKS proxy).
+ // This parameter is required when the value of CustomKeyStoreType is EXTERNAL_KEY_STORE
+ // and the value of XksProxyConnectivity is VPC_ENDPOINT_SERVICE.
+ //
+ // The Amazon VPC endpoint service must fulfill all requirements (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements)
+ // for use with an external key store.
+ //
+ // Uniqueness requirements:
+ //
+ // * External key stores with VPC_ENDPOINT_SERVICE connectivity can share
+ // an Amazon VPC, but each external key store must have its own VPC endpoint
+ // service and private DNS name.
+ XksProxyVpcEndpointServiceName *string `min:"20" type:"string"`
}
// String returns the string representation.
@@ -9110,11 +9919,25 @@ func (s *CreateCustomKeyStoreInput) Validate() error {
if s.TrustAnchorCertificate != nil && len(*s.TrustAnchorCertificate) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TrustAnchorCertificate", 1))
}
-
- if invalidParams.Len() > 0 {
- return invalidParams
+ if s.XksProxyUriEndpoint != nil && len(*s.XksProxyUriEndpoint) < 10 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyUriEndpoint", 10))
}
- return nil
+ if s.XksProxyUriPath != nil && len(*s.XksProxyUriPath) < 10 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyUriPath", 10))
+ }
+ if s.XksProxyVpcEndpointServiceName != nil && len(*s.XksProxyVpcEndpointServiceName) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyVpcEndpointServiceName", 20))
+ }
+ if s.XksProxyAuthenticationCredential != nil {
+ if err := s.XksProxyAuthenticationCredential.Validate(); err != nil {
+ invalidParams.AddNested("XksProxyAuthenticationCredential", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
}
// SetCloudHsmClusterId sets the CloudHsmClusterId field's value.
@@ -9129,6 +9952,12 @@ func (s *CreateCustomKeyStoreInput) SetCustomKeyStoreName(v string) *CreateCusto
return s
}
+// SetCustomKeyStoreType sets the CustomKeyStoreType field's value.
+func (s *CreateCustomKeyStoreInput) SetCustomKeyStoreType(v string) *CreateCustomKeyStoreInput {
+ s.CustomKeyStoreType = &v
+ return s
+}
+
// SetKeyStorePassword sets the KeyStorePassword field's value.
func (s *CreateCustomKeyStoreInput) SetKeyStorePassword(v string) *CreateCustomKeyStoreInput {
s.KeyStorePassword = &v
@@ -9141,6 +9970,36 @@ func (s *CreateCustomKeyStoreInput) SetTrustAnchorCertificate(v string) *CreateC
return s
}
+// SetXksProxyAuthenticationCredential sets the XksProxyAuthenticationCredential field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyAuthenticationCredential(v *XksProxyAuthenticationCredentialType) *CreateCustomKeyStoreInput {
+ s.XksProxyAuthenticationCredential = v
+ return s
+}
+
+// SetXksProxyConnectivity sets the XksProxyConnectivity field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyConnectivity(v string) *CreateCustomKeyStoreInput {
+ s.XksProxyConnectivity = &v
+ return s
+}
+
+// SetXksProxyUriEndpoint sets the XksProxyUriEndpoint field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyUriEndpoint(v string) *CreateCustomKeyStoreInput {
+ s.XksProxyUriEndpoint = &v
+ return s
+}
+
+// SetXksProxyUriPath sets the XksProxyUriPath field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyUriPath(v string) *CreateCustomKeyStoreInput {
+ s.XksProxyUriPath = &v
+ return s
+}
+
+// SetXksProxyVpcEndpointServiceName sets the XksProxyVpcEndpointServiceName field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyVpcEndpointServiceName(v string) *CreateCustomKeyStoreInput {
+ s.XksProxyVpcEndpointServiceName = &v
+ return s
+}
+
type CreateCustomKeyStoreOutput struct {
_ struct{} `type:"structure"`
@@ -9216,13 +10075,11 @@ type CreateGrantInput struct {
// The identity that gets the permissions specified in the grant.
//
- // To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // of an Amazon Web Services principal. Valid Amazon Web Services principals
- // include Amazon Web Services accounts (root), IAM users, IAM roles, federated
- // users, and assumed role users. For examples of the ARN syntax to use for
- // specifying a principal, see Amazon Web Services Identity and Access Management
- // (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam)
- // in the Example ARNs section of the Amazon Web Services General Reference.
+ // To specify the grantee principal, use the Amazon Resource Name (ARN) of an
+ // Amazon Web Services principal. Valid principals include Amazon Web Services
+ // accounts, IAM users, IAM roles, federated users, and assumed role users.
+ // For help with the ARN syntax for a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns)
+ // in the Identity and Access Management User Guide .
//
// GranteePrincipal is a required field
GranteePrincipal *string `min:"1" type:"string" required:"true"`
@@ -9275,12 +10132,10 @@ type CreateGrantInput struct {
// the grant.
//
// To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // of an Amazon Web Services principal. Valid Amazon Web Services principals
- // include Amazon Web Services accounts (root), IAM users, federated users,
- // and assumed role users. For examples of the ARN syntax to use for specifying
- // a principal, see Amazon Web Services Identity and Access Management (IAM)
- // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam)
- // in the Example ARNs section of the Amazon Web Services General Reference.
+ // of an Amazon Web Services principal. Valid principals include Amazon Web
+ // Services accounts, IAM users, IAM roles, federated users, and assumed role
+ // users. For help with the ARN syntax for a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns)
+ // in the Identity and Access Management User Guide .
//
// The grant determines the retiring principal. Other principals might have
// permission to retire the grant or revoke the grant. For details, see RevokeGrant
@@ -9431,46 +10286,39 @@ func (s *CreateGrantOutput) SetGrantToken(v string) *CreateGrantOutput {
type CreateKeyInput struct {
_ struct{} `type:"structure"`
- // A flag to indicate whether to bypass the key policy lockout safety check.
+ // Skips ("bypasses") the key policy lockout safety check. The default value
+ // is false.
//
// Setting this value to true increases the risk that the KMS key becomes unmanageable.
// Do not set this value to true indiscriminately.
//
- // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section in the Key Management Service Developer Guide .
- //
- // Use this parameter only when you include a policy in the request and you
- // intend to prevent the principal that is making the request from making a
- // subsequent PutKeyPolicy request on the KMS key.
+ // For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide.
//
- // The default value is false.
+ // Use this parameter only when you intend to prevent the principal that is
+ // making the request from making a subsequent PutKeyPolicy request on the KMS
+ // key.
BypassPolicyLockoutSafetyCheck *bool `type:"boolean"`
- // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
- // and the key material in its associated CloudHSM cluster. To create a KMS
- // key in a custom key store, you must also specify the Origin parameter with
- // a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the
- // custom key store must have at least two active HSMs, each in a different
- // Availability Zone in the Region.
+ // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
+ // The ConnectionState of the custom key store must be CONNECTED. To find the
+ // CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation.
//
// This parameter is valid only for symmetric encryption KMS keys in a single
// Region. You cannot create any other type of KMS key in a custom key store.
//
- // To find the ID of a custom key store, use the DescribeCustomKeyStores operation.
- //
- // The response includes the custom key store ID and the ID of the CloudHSM
- // cluster.
- //
- // This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
- // feature in KMS, which combines the convenience and extensive integration
- // of KMS with the isolation and control of a single-tenant key store.
+ // When you create a KMS key in an CloudHSM key store, KMS generates a non-exportable
+ // 256-bit symmetric key in its associated CloudHSM cluster and associates it
+ // with the KMS key. When you create a KMS key in an external key store, you
+ // must use the XksKeyId parameter to specify an external key that serves as
+ // key material for the KMS key.
CustomKeyStoreId *string `min:"1" type:"string"`
// Instead, use the KeySpec parameter.
//
// The KeySpec and CustomerMasterKeySpec parameters work the same way. Only
// the names differ. We recommend that you use KeySpec parameter in your code.
- // However, to avoid breaking changes, KMS will support both parameters.
+ // However, to avoid breaking changes, KMS supports both parameters.
//
// Deprecated: This parameter has been deprecated. Instead, use the KeySpec parameter.
CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"`
@@ -9491,11 +10339,11 @@ type CreateKeyInput struct {
// in the Key Management Service Developer Guide .
//
// The KeySpec determines whether the KMS key contains a symmetric key or an
- // asymmetric key pair. It also determines the cryptographic algorithms that
- // the KMS key supports. You can't change the KeySpec after the KMS key is created.
- // To further restrict the algorithms that can be used with the KMS key, use
- // a condition key in its key policy or IAM policy. For more information, see
- // kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm),
+ // asymmetric key pair. It also determines the algorithms that the KMS key supports.
+ // You can't change the KeySpec after the KMS key is created. To further restrict
+ // the algorithms that can be used with the KMS key, use a condition key in
+ // its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm),
// kms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm)
// or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm)
// in the Key Management Service Developer Guide .
@@ -9561,65 +10409,59 @@ type CreateKeyInput struct {
// This value creates a primary key, not a replica. To create a replica key,
// use the ReplicateKey operation.
//
- // You can create a multi-Region version of a symmetric encryption KMS key,
- // an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material.
- // However, you cannot create a multi-Region key in a custom key store.
+ // You can create a symmetric or asymmetric multi-Region key, and you can create
+ // a multi-Region key with imported key material. However, you cannot create
+ // a multi-Region key in a custom key store.
MultiRegion *bool `type:"boolean"`
// The source of the key material for the KMS key. You cannot change the origin
// after you create the KMS key. The default is AWS_KMS, which means that KMS
// creates the key material.
//
- // To create a KMS key with no key material (for imported key material), set
- // the value to EXTERNAL. For more information about importing key material
- // into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
- // in the Key Management Service Developer Guide. This value is valid only for
- // symmetric encryption KMS keys.
+ // To create a KMS key with no key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html)
+ // (for imported key material), set this value to EXTERNAL. For more information
+ // about importing key material into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
+ // in the Key Management Service Developer Guide. The EXTERNAL origin value
+ // is valid only for symmetric KMS keys.
//
- // To create a KMS key in an KMS custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+ // To create a KMS key in an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html)
// and create its key material in the associated CloudHSM cluster, set this
// value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to
- // identify the custom key store. This value is valid only for symmetric encryption
- // KMS keys.
+ // identify the CloudHSM key store. The KeySpec value must be SYMMETRIC_DEFAULT.
+ //
+ // To create a KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keys.html),
+ // set this value to EXTERNAL_KEY_STORE. You must also use the CustomKeyStoreId
+ // parameter to identify the external key store and the XksKeyId parameter to
+ // identify the associated external key. The KeySpec value must be SYMMETRIC_DEFAULT.
Origin *string `type:"string" enum:"OriginType"`
- // The key policy to attach to the KMS key. If you do not specify a key policy,
- // KMS attaches a default key policy to the KMS key. For more information, see
- // Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default)
- // in the Key Management Service Developer Guide.
+ // The key policy to attach to the KMS key.
//
// If you provide a key policy, it must meet the following criteria:
//
- // * If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy
- // must allow the principal that is making the CreateKey request to make
- // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk
- // that the KMS key becomes unmanageable. For more information, refer to
- // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section of the Key Management Service Developer Guide .
+ // * The key policy must allow the calling principal to make a subsequent
+ // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS
+ // key becomes unmanageable. For more information, see Default key policy
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide. (To omit this condition,
+ // set BypassPolicyLockoutSafetyCheck to true.)
//
// * Each statement in the key policy must contain one or more principals.
// The principals in the key policy must exist and be visible to KMS. When
- // you create a new Amazon Web Services principal (for example, an IAM user
- // or role), you might need to enforce a delay before including the new principal
- // in a key policy because the new principal might not be immediately visible
- // to KMS. For more information, see Changes that I make are not always immediately
- // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
+ // you create a new Amazon Web Services principal, you might need to enforce
+ // a delay before including the new principal in a key policy because the
+ // new principal might not be immediately visible to KMS. For more information,
+ // see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
// in the Amazon Web Services Identity and Access Management User Guide.
//
- // A key policy document can include only the following characters:
- //
- // * Printable ASCII characters from the space character (\u0020) through
- // the end of the ASCII character range.
- //
- // * Printable characters in the Basic Latin and Latin-1 Supplement character
- // set (through \u00FF).
+ // If you do not provide a key policy, KMS attaches a default key policy to
+ // the KMS key. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default)
+ // in the Key Management Service Developer Guide.
//
- // * The tab (\u0009), line feed (\u000A), and carriage return (\u000D) special
- // characters
+ // The key policy size quota is 32 kilobytes (32768 bytes).
//
- // For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
- // in the Key Management Service Developer Guide. For help writing and formatting
- // a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)
+ // For help writing and formatting a JSON policy document, see the IAM JSON
+ // Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)
// in the Identity and Access Management User Guide .
Policy *string `min:"1" type:"string"`
@@ -9627,7 +10469,7 @@ type CreateKeyInput struct {
// key when it is created. To tag an existing KMS key, use the TagResource operation.
//
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
- // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+ // For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
@@ -9644,6 +10486,33 @@ type CreateKeyInput struct {
// Tags can also be used to control access to a KMS key. For details, see Tagging
// Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html).
Tags []*Tag `type:"list"`
+
+ // Identifies the external key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)
+ // that serves as key material for the KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html).
+ // Specify the ID that the external key store proxy (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-xks-proxy)
+ // uses to refer to the external key. For help, see the documentation for your
+ // external key store proxy.
+ //
+ // This parameter is required for a KMS key with an Origin value of EXTERNAL_KEY_STORE.
+ // It is not valid for KMS keys with any other Origin value.
+ //
+ // The external key must be an existing 256-bit AES symmetric encryption key
+ // hosted outside of Amazon Web Services in an external key manager associated
+ // with the external key store specified by the CustomKeyStoreId parameter.
+ // This key must be enabled and configured to perform encryption and decryption.
+ // Each KMS key in an external key store must use a different external key.
+ // For details, see Requirements for a KMS key in an external key store (https://docs.aws.amazon.com/create-xks-keys.html#xks-key-requirements)
+ // in the Key Management Service Developer Guide.
+ //
+ // Each KMS key in an external key store is associated two backing keys. One
+ // is key material that KMS generates. The other is the external key specified
+ // by this parameter. When you use the KMS key in an external key store to encrypt
+ // data, the encryption operation is performed first by KMS using the KMS key
+ // material, and then by the external key manager using the specified external
+ // key, a process known as double encryption. For details, see Double encryption
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-double-encryption)
+ // in the Key Management Service Developer Guide.
+ XksKeyId *string `min:"1" type:"string"`
}
// String returns the string representation.
@@ -9673,6 +10542,9 @@ func (s *CreateKeyInput) Validate() error {
if s.Policy != nil && len(*s.Policy) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
}
+ if s.XksKeyId != nil && len(*s.XksKeyId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("XksKeyId", 1))
+ }
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
@@ -9750,6 +10622,12 @@ func (s *CreateKeyInput) SetTags(v []*Tag) *CreateKeyInput {
return s
}
+// SetXksKeyId sets the XksKeyId field's value.
+func (s *CreateKeyInput) SetXksKeyId(v string) *CreateKeyInput {
+ s.XksKeyId = &v
+ return s
+}
+
type CreateKeyOutput struct {
_ struct{} `type:"structure"`
@@ -9854,17 +10732,27 @@ func (s *CustomKeyStoreHasCMKsException) RequestID() string {
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
type CustomKeyStoreInvalidStateException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -10064,39 +10952,53 @@ func (s *CustomKeyStoreNotFoundException) RequestID() string {
type CustomKeyStoresListEntry struct {
_ struct{} `type:"structure"`
- // A unique identifier for the CloudHSM cluster that is associated with the
- // custom key store.
+ // A unique identifier for the CloudHSM cluster that is associated with an CloudHSM
+ // key store. This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM.
CloudHsmClusterId *string `min:"19" type:"string"`
// Describes the connection error. This field appears in the response only when
- // the ConnectionState is FAILED. For help resolving these errors, see How to
- // Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed)
- // in Key Management Service Developer Guide.
+ // the ConnectionState is FAILED.
//
- // Valid values are:
- //
- // * CLUSTER_NOT_FOUND - KMS cannot find the CloudHSM cluster with the specified
- // cluster ID.
+ // Many failures can be resolved by updating the properties of the custom key
+ // store. To update a custom key store, disconnect it (DisconnectCustomKeyStore),
+ // correct the errors (UpdateCustomKeyStore), and try to connect again (ConnectCustomKeyStore).
+ // For additional help resolving these errors, see How to Fix a Connection Failure
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed)
+ // in Key Management Service Developer Guide.
//
- // * INSUFFICIENT_CLOUDHSM_HSMS - The associated CloudHSM cluster does not
- // contain any active HSMs. To connect a custom key store to its CloudHSM
- // cluster, the cluster must contain at least one active HSM.
+ // All custom key stores:
//
- // * INTERNAL_ERROR - KMS could not complete the request due to an internal
+ // * INTERNAL_ERROR — KMS could not complete the request due to an internal
// error. Retry the request. For ConnectCustomKeyStore requests, disconnect
// the custom key store before trying to connect again.
//
- // * INVALID_CREDENTIALS - KMS does not have the correct password for the
- // kmsuser crypto user in the CloudHSM cluster. Before you can connect your
- // custom key store to its CloudHSM cluster, you must change the kmsuser
- // account password and update the key store password value for the custom
- // key store.
+ // * NETWORK_ERRORS — Network errors are preventing KMS from connecting
+ // the custom key store to its backing key store.
//
- // * NETWORK_ERRORS - Network errors are preventing KMS from connecting to
- // the custom key store.
+ // CloudHSM key stores:
+ //
+ // * CLUSTER_NOT_FOUND — KMS cannot find the CloudHSM cluster with the
+ // specified cluster ID.
+ //
+ // * INSUFFICIENT_CLOUDHSM_HSMS — The associated CloudHSM cluster does
+ // not contain any active HSMs. To connect a custom key store to its CloudHSM
+ // cluster, the cluster must contain at least one active HSM.
+ //
+ // * INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET — At least one private subnet
+ // associated with the CloudHSM cluster doesn't have any available IP addresses.
+ // A CloudHSM key store connection requires one free IP address in each of
+ // the associated private subnets, although two are preferable. For details,
+ // see How to Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed)
+ // in the Key Management Service Developer Guide.
+ //
+ // * INVALID_CREDENTIALS — The KeyStorePassword for the custom key store
+ // doesn't match the current password of the kmsuser crypto user in the CloudHSM
+ // cluster. Before you can connect your custom key store to its CloudHSM
+ // cluster, you must change the kmsuser account password and update the KeyStorePassword
+ // value for the custom key store.
//
- // * SUBNET_NOT_FOUND - A subnet in the CloudHSM cluster configuration was
- // deleted. If KMS cannot find all of the subnets in the cluster configuration,
+ // * SUBNET_NOT_FOUND — A subnet in the CloudHSM cluster configuration
+ // was deleted. If KMS cannot find all of the subnets in the cluster configuration,
// attempts to connect the custom key store to the CloudHSM cluster fail.
// To fix this error, create a cluster from a recent backup and associate
// it with your custom key store. (This process creates a new cluster configuration
@@ -10104,13 +11006,13 @@ type CustomKeyStoresListEntry struct {
// Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed)
// in the Key Management Service Developer Guide.
//
- // * USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated
+ // * USER_LOCKED_OUT — The kmsuser CU account is locked out of the associated
// CloudHSM cluster due to too many failed password attempts. Before you
// can connect your custom key store to its CloudHSM cluster, you must change
// the kmsuser account password and update the key store password value for
// the custom key store.
//
- // * USER_LOGGED_IN - The kmsuser CU account is logged into the the associated
+ // * USER_LOGGED_IN — The kmsuser CU account is logged into the associated
// CloudHSM cluster. This prevents KMS from rotating the kmsuser account
// password and logging into the cluster. Before you can connect your custom
// key store to its CloudHSM cluster, you must log the kmsuser CU out of
@@ -10119,27 +11021,94 @@ type CustomKeyStoresListEntry struct {
// store. For help, see How to Log Out and Reconnect (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2)
// in the Key Management Service Developer Guide.
//
- // * USER_NOT_FOUND - KMS cannot find a kmsuser CU account in the associated
+ // * USER_NOT_FOUND — KMS cannot find a kmsuser CU account in the associated
// CloudHSM cluster. Before you can connect your custom key store to its
// CloudHSM cluster, you must create a kmsuser CU account in the cluster,
// and then update the key store password value for the custom key store.
+ //
+ // External key stores:
+ //
+ // * INVALID_CREDENTIALS — One or both of the XksProxyAuthenticationCredential
+ // values is not valid on the specified external key store proxy.
+ //
+ // * XKS_PROXY_ACCESS_DENIED — KMS requests are denied access to the external
+ // key store proxy. If the external key store proxy has authorization rules,
+ // verify that they permit KMS to communicate with the proxy on your behalf.
+ //
+ // * XKS_PROXY_INVALID_CONFIGURATION — A configuration error is preventing
+ // the external key store from connecting to its proxy. Verify the value
+ // of the XksProxyUriPath.
+ //
+ // * XKS_PROXY_INVALID_RESPONSE — KMS cannot interpret the response from
+ // the external key store proxy. If you see this connection error code repeatedly,
+ // notify your external key store proxy vendor.
+ //
+ // * XKS_PROXY_INVALID_TLS_CONFIGURATION — KMS cannot connect to the external
+ // key store proxy because the TLS configuration is invalid. Verify that
+ // the XKS proxy supports TLS 1.2 or 1.3. Also, verify that the TLS certificate
+ // is not expired, and that it matches the hostname in the XksProxyUriEndpoint
+ // value, and that it is signed by a certificate authority included in the
+ // Trusted Certificate Authorities (https://github.com/aws/aws-kms-xksproxy-api-spec/blob/main/TrustedCertificateAuthorities)
+ // list.
+ //
+ // * XKS_PROXY_NOT_REACHABLE — KMS can't communicate with your external
+ // key store proxy. Verify that the XksProxyUriEndpoint and XksProxyUriPath
+ // are correct. Use the tools for your external key store proxy to verify
+ // that the proxy is active and available on its network. Also, verify that
+ // your external key manager instances are operating properly. Connection
+ // attempts fail with this connection error code if the proxy reports that
+ // all external key manager instances are unavailable.
+ //
+ // * XKS_PROXY_TIMED_OUT — KMS can connect to the external key store proxy,
+ // but the proxy does not respond to KMS in the time allotted. If you see
+ // this connection error code repeatedly, notify your external key store
+ // proxy vendor.
+ //
+ // * XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION — The Amazon VPC endpoint
+ // service configuration doesn't conform to the requirements for an KMS external
+ // key store. The VPC endpoint service must be an endpoint service for interface
+ // endpoints in the caller's Amazon Web Services account. It must have a
+ // network load balancer (NLB) connected to at least two subnets, each in
+ // a different Availability Zone. The Allow principals list must include
+ // the KMS service principal for the Region, cks.kms..amazonaws.com,
+ // such as cks.kms.us-east-1.amazonaws.com. It must not require acceptance
+ // (https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html)
+ // of connection requests. It must have a private DNS name. The private DNS
+ // name for an external key store with VPC_ENDPOINT_SERVICE connectivity
+ // must be unique in its Amazon Web Services Region. The domain of the private
+ // DNS name must have a verification status (https://docs.aws.amazon.com/vpc/latest/privatelink/verify-domains.html)
+ // of verified. The TLS certificate (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html)
+ // specifies the private DNS hostname at which the endpoint is reachable.
+ //
+ // * XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND — KMS can't find the VPC endpoint
+ // service that it uses to communicate with the external key store proxy.
+ // Verify that the XksProxyVpcEndpointServiceName is correct and the KMS
+ // service principal has service consumer permissions on the Amazon VPC endpoint
+ // service.
ConnectionErrorCode *string `type:"string" enum:"ConnectionErrorCodeType"`
- // Indicates whether the custom key store is connected to its CloudHSM cluster.
+ // Indicates whether the custom key store is connected to its backing key store.
+ // For an CloudHSM key store, the ConnectionState indicates whether it is connected
+ // to its CloudHSM cluster. For an external key store, the ConnectionState indicates
+ // whether it is connected to the external key store proxy that communicates
+ // with your external key manager.
//
- // You can create and use KMS keys in your custom key stores only when its connection
- // state is CONNECTED.
+ // You can create and use KMS keys in your custom key stores only when its ConnectionState
+ // is CONNECTED.
//
- // The value is DISCONNECTED if the key store has never been connected or you
- // use the DisconnectCustomKeyStore operation to disconnect it. If the value
- // is CONNECTED but you are having trouble using the custom key store, make
- // sure that its associated CloudHSM cluster is active and contains at least
- // one active HSM.
+ // The ConnectionState value is DISCONNECTED only if the key store has never
+ // been connected or you use the DisconnectCustomKeyStore operation to disconnect
+ // it. If the value is CONNECTED but you are having trouble using the custom
+ // key store, make sure that the backing key store is reachable and active.
+ // For an CloudHSM key store, verify that its associated CloudHSM cluster is
+ // active and contains at least one active HSM. For an external key store, verify
+ // that the external key store proxy and external key manager are connected
+ // and enabled.
//
// A value of FAILED indicates that an attempt to connect was unsuccessful.
// The ConnectionErrorCode field in the response indicates the cause of the
- // failure. For help resolving a connection failure, see Troubleshooting a Custom
- // Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+ // failure. For help resolving a connection failure, see Troubleshooting a custom
+ // key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
// in the Key Management Service Developer Guide.
ConnectionState *string `type:"string" enum:"ConnectionStateType"`
@@ -10152,10 +11121,26 @@ type CustomKeyStoresListEntry struct {
// The user-specified friendly name for the custom key store.
CustomKeyStoreName *string `min:"1" type:"string"`
- // The trust anchor certificate of the associated CloudHSM cluster. When you
- // initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
+ // Indicates the type of the custom key store. AWS_CLOUDHSM indicates a custom
+ // key store backed by an CloudHSM cluster. EXTERNAL_KEY_STORE indicates a custom
+ // key store backed by an external key store proxy and external key manager
+ // outside of Amazon Web Services.
+ CustomKeyStoreType *string `type:"string" enum:"CustomKeyStoreType"`
+
+ // The trust anchor certificate of the CloudHSM cluster associated with an CloudHSM
+ // key store. When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
// you create this certificate and save it in the customerCA.crt file.
+ //
+ // This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM.
TrustAnchorCertificate *string `min:"1" type:"string"`
+
+ // Configuration settings for the external key store proxy (XKS proxy). The
+ // external key store proxy translates KMS requests into a format that your
+ // external key manager can understand. The proxy configuration includes connection
+ // information that KMS requires.
+ //
+ // This field appears only when the CustomKeyStoreType is EXTERNAL_KEY_STORE.
+ XksProxyConfiguration *XksProxyConfigurationType `type:"structure"`
}
// String returns the string representation.
@@ -10212,12 +11197,24 @@ func (s *CustomKeyStoresListEntry) SetCustomKeyStoreName(v string) *CustomKeySto
return s
}
+// SetCustomKeyStoreType sets the CustomKeyStoreType field's value.
+func (s *CustomKeyStoresListEntry) SetCustomKeyStoreType(v string) *CustomKeyStoresListEntry {
+ s.CustomKeyStoreType = &v
+ return s
+}
+
// SetTrustAnchorCertificate sets the TrustAnchorCertificate field's value.
func (s *CustomKeyStoresListEntry) SetTrustAnchorCertificate(v string) *CustomKeyStoresListEntry {
s.TrustAnchorCertificate = &v
return s
}
+// SetXksProxyConfiguration sets the XksProxyConfiguration field's value.
+func (s *CustomKeyStoresListEntry) SetXksProxyConfiguration(v *XksProxyConfigurationType) *CustomKeyStoresListEntry {
+ s.XksProxyConfiguration = v
+ return s
+}
+
type DecryptInput struct {
_ struct{} `type:"structure"`
@@ -10643,8 +11640,8 @@ func (s DeleteImportedKeyMaterialOutput) GoString() string {
return s.String()
}
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
type DependencyTimeoutException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -10716,8 +11713,8 @@ type DescribeCustomKeyStoresInput struct {
//
// By default, this operation gets information about all custom key stores in
// the account and Region. To limit the output to a particular custom key store,
- // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter,
- // but not both.
+ // provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but
+ // not both.
CustomKeyStoreId *string `min:"1" type:"string"`
// Gets only information about the specified custom key store. Enter the friendly
@@ -10725,8 +11722,8 @@ type DescribeCustomKeyStoresInput struct {
//
// By default, this operation gets information about all custom key stores in
// the account and Region. To limit the output to a particular custom key store,
- // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter,
- // but not both.
+ // provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but
+ // not both.
CustomKeyStoreName *string `min:"1" type:"string"`
// Use this parameter to specify the maximum number of items to return. When
@@ -11361,13 +12358,13 @@ func (s EnableKeyOutput) GoString() string {
type EnableKeyRotationInput struct {
_ struct{} `type:"structure"`
- // Identifies a symmetric encryption KMS key. You cannot enable or disable automatic
- // rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+ // Identifies a symmetric encryption KMS key. You cannot enable automatic rotation
+ // of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
- // The key rotation status of these KMS keys is always false. To enable or disable
- // automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+ // To enable or disable automatic rotation of a set of related multi-Region
+ // keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
// set the property on the primary key.
//
// Specify the key ID or key ARN of the KMS key.
@@ -11455,6 +12452,8 @@ type EncryptInput struct {
// This parameter is required only for asymmetric KMS keys. The default value,
// SYMMETRIC_DEFAULT, is the algorithm used for symmetric encryption KMS keys.
// If you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256.
+ //
+ // The SM2PKE algorithm is only available in China Regions.
EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"`
// Specifies the encryption context that will be used to encrypt the data. An
@@ -11963,8 +12962,7 @@ type GenerateDataKeyPairInput struct {
// encrypt and decrypt or to sign and verify (but not both), and the rule that
// permits you to use ECC KMS keys only to sign and verify, are not effective
// on data key pairs, which are used outside of KMS. The SM2 key spec is only
- // available in China Regions. RSA and ECC asymmetric key pairs are also available
- // in China Regions.
+ // available in China Regions.
//
// KeyPairSpec is a required field
KeyPairSpec *string `type:"string" required:"true" enum:"DataKeyPairSpec"`
@@ -12169,8 +13167,7 @@ type GenerateDataKeyPairWithoutPlaintextInput struct {
// encrypt and decrypt or to sign and verify (but not both), and the rule that
// permits you to use ECC KMS keys only to sign and verify, are not effective
// on data key pairs, which are used outside of KMS. The SM2 key spec is only
- // available in China Regions. RSA and ECC asymmetric key pairs are also available
- // in China Regions.
+ // available in China Regions.
//
// KeyPairSpec is a required field
KeyPairSpec *string `type:"string" required:"true" enum:"DataKeyPairSpec"`
@@ -12592,8 +13589,10 @@ type GenerateMacOutput struct {
// The HMAC KMS key used in the operation.
KeyId *string `min:"1" type:"string"`
- // The hash-based message authentication code (HMAC) for the given message,
- // key, and MAC algorithm.
+ // The hash-based message authentication code (HMAC) that was generated for
+ // the specified message, HMAC KMS key, and MAC algorithm.
+ //
+ // This is the standard, raw HMAC defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104).
// Mac is automatically base64 encoded/decoded by the SDK.
Mac []byte `min:"1" type:"blob"`
@@ -12641,8 +13640,11 @@ type GenerateRandomInput struct {
_ struct{} `type:"structure"`
// Generates the random byte string in the CloudHSM cluster that is associated
- // with the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
- // To find the ID of a custom key store, use the DescribeCustomKeyStores operation.
+ // with the specified CloudHSM key store. To find the ID of a custom key store,
+ // use the DescribeCustomKeyStores operation.
+ //
+ // External key store IDs are not valid for this parameter. If you specify the
+ // ID of an external key store, GenerateRandom throws an UnsupportedOperationException.
CustomKeyStoreId *string `min:"1" type:"string"`
// The length of the random byte string. This parameter is required.
@@ -12951,11 +13953,17 @@ type GetParametersForImportInput struct {
// KeyId is a required field
KeyId *string `min:"1" type:"string" required:"true"`
- // The algorithm you will use to encrypt the key material before importing it
- // with ImportKeyMaterial. For more information, see Encrypt the Key Material
+ // The algorithm you will use to encrypt the key material before using the ImportKeyMaterial
+ // operation to import it. For more information, see Encrypt the key material
// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html)
// in the Key Management Service Developer Guide.
//
+ // The RSAES_PKCS1_V1_5 wrapping algorithm is deprecated. We recommend that
+ // you begin using a different wrapping algorithm immediately. KMS will end
+ // support for RSAES_PKCS1_V1_5 by October 1, 2023 pursuant to cryptographic
+ // key management guidance (https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-131Ar2.pdf)
+ // from the National Institute of Standards and Technology (NIST).
+ //
// WrappingAlgorithm is a required field
WrappingAlgorithm *string `type:"string" required:"true" enum:"AlgorithmSpec"`
@@ -13183,7 +14191,7 @@ type GetPublicKeyOutput struct {
//
// The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend
// that you use the KeySpec field in your code. However, to avoid breaking changes,
- // KMS will support both fields.
+ // KMS supports both fields.
//
// Deprecated: This field has been deprecated. Instead, use the KeySpec field.
CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"`
@@ -13293,10 +14301,10 @@ func (s *GetPublicKeyOutput) SetSigningAlgorithms(v []*string) *GetPublicKeyOutp
//
// KMS applies the grant constraints only to cryptographic operations that support
// an encryption context, that is, all cryptographic operations with a symmetric
-// encryption KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks).
+// KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks).
// Grant constraints are not applied to operations that do not support an encryption
-// context, such as cryptographic operations with HMAC KMS keys or asymmetric
-// KMS keys, and management operations, such as DescribeKey or RetireGrant.
+// context, such as cryptographic operations with asymmetric KMS keys and management
+// operations, such as DescribeKey or RetireGrant.
//
// In a cryptographic operation, the encryption context in the decryption operation
// must be an exact, case-sensitive match for the keys and values in the encryption
@@ -13481,9 +14489,15 @@ type ImportKeyMaterialInput struct {
// EncryptedKeyMaterial is a required field
EncryptedKeyMaterial []byte `min:"1" type:"blob" required:"true"`
- // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES,
- // in which case you must include the ValidTo parameter. When this parameter
- // is set to KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.
+ // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES.
+ //
+ // When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify
+ // a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE,
+ // you must omit the ValidTo parameter.
+ //
+ // You cannot change the ExpirationModel or ValidTo values for the current import
+ // after the request completes. To change either value, you must delete (DeleteImportedKeyMaterial)
+ // and reimport the key material.
ExpirationModel *string `type:"string" enum:"ExpirationModelType"`
// The import token that you received in the response to a previous GetParametersForImport
@@ -13514,10 +14528,20 @@ type ImportKeyMaterialInput struct {
// KeyId is a required field
KeyId *string `min:"1" type:"string" required:"true"`
- // The time at which the imported key material expires. When the key material
- // expires, KMS deletes the key material and the KMS key becomes unusable. You
- // must omit this parameter when the ExpirationModel parameter is set to KEY_MATERIAL_DOES_NOT_EXPIRE.
- // Otherwise it is required.
+ // The date and time when the imported key material expires. This parameter
+ // is required when the value of the ExpirationModel parameter is KEY_MATERIAL_EXPIRES.
+ // Otherwise it is not valid.
+ //
+ // The value of this parameter must be a future date and time. The maximum value
+ // is 365 days from the request date.
+ //
+ // When the key material expires, KMS deletes the key material from the KMS
+ // key. Without its key material, the KMS key is unusable. To use the KMS key
+ // in cryptographic operations, you must reimport the same key material.
+ //
+ // You cannot change the ExpirationModel or ValidTo values for the current import
+ // after the request completes. To change either value, you must delete (DeleteImportedKeyMaterial)
+ // and reimport the key material.
ValidTo *time.Time `type:"timestamp"`
}
@@ -13752,9 +14776,10 @@ func (s *IncorrectKeyMaterialException) RequestID() string {
}
// The request was rejected because the trust anchor certificate in the request
-// is not the trust anchor certificate for the specified CloudHSM cluster.
+// to create an CloudHSM key store is not the trust anchor certificate for the
+// specified CloudHSM cluster.
//
-// When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
+// When you initialize the CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
// you create the trust anchor certificate and save it in the customerCA.crt
// file.
type IncorrectTrustAnchorException struct {
@@ -14423,9 +15448,17 @@ func (s *InvalidMarkerException) RequestID() string {
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
-// in the Key Management Service Developer Guide .
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// in the Key Management Service Developer Guide .
+//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
type InvalidStateException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -14664,8 +15697,8 @@ func (s *KeyListEntry) SetKeyId(v string) *KeyListEntry {
// Contains metadata about a KMS key.
//
-// This data type is used as a response element for the CreateKey and DescribeKey
-// operations.
+// This data type is used as a response element for the CreateKey, DescribeKey,
+// and ReplicateKey operations.
type KeyMetadata struct {
_ struct{} `type:"structure"`
@@ -14679,16 +15712,17 @@ type KeyMetadata struct {
Arn *string `min:"20" type:"string"`
// The cluster ID of the CloudHSM cluster that contains the key material for
- // the KMS key. When you create a KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
+ // the KMS key. When you create a KMS key in an CloudHSM custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
// KMS creates the key material for the KMS key in the associated CloudHSM cluster.
- // This value is present only when the KMS key is created in a custom key store.
+ // This field is present only when the KMS key is created in an CloudHSM key
+ // store.
CloudHsmClusterId *string `min:"19" type:"string"`
// The date and time when the KMS key was created.
CreationDate *time.Time `type:"timestamp"`
// A unique identifier for the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
- // that contains the KMS key. This value is present only when the KMS key is
+ // that contains the KMS key. This field is present only when the KMS key is
// created in a custom key store.
CustomKeyStoreId *string `min:"1" type:"string"`
@@ -14696,7 +15730,7 @@ type KeyMetadata struct {
//
// The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend
// that you use the KeySpec field in your code. However, to avoid breaking changes,
- // KMS will support both fields.
+ // KMS supports both fields.
//
// Deprecated: This field has been deprecated. Instead, use the KeySpec field.
CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"`
@@ -14814,6 +15848,13 @@ type KeyMetadata struct {
// value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel
// is KEY_MATERIAL_EXPIRES, otherwise this value is omitted.
ValidTo *time.Time `type:"timestamp"`
+
+ // Information about the external key that is associated with a KMS key in an
+ // external key store.
+ //
+ // For more information, see External key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)
+ // in the Key Management Service Developer Guide.
+ XksKeyConfiguration *XksKeyConfigurationType `type:"structure"`
}
// String returns the string representation.
@@ -14972,6 +16013,12 @@ func (s *KeyMetadata) SetValidTo(v time.Time) *KeyMetadata {
return s
}
+// SetXksKeyConfiguration sets the XksKeyConfiguration field's value.
+func (s *KeyMetadata) SetXksKeyConfiguration(v *XksKeyConfigurationType) *KeyMetadata {
+ s.XksKeyConfiguration = v
+ return s
+}
+
// The request was rejected because the specified KMS key was not available.
// You can retry the request.
type KeyUnavailableException struct {
@@ -15781,7 +16828,7 @@ type ListResourceTagsOutput struct {
// A list of tags. Each tag consists of a tag key and a tag value.
//
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
- // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+ // For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
Tags []*Tag `type:"list"`
@@ -15848,11 +16895,10 @@ type ListRetirableGrantsInput struct {
// Amazon Web Services account.
//
// To specify the retiring principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // of an Amazon Web Services principal. Valid Amazon Web Services principals
- // include Amazon Web Services accounts (root), IAM users, federated users,
- // and assumed role users. For examples of the ARN syntax for specifying a principal,
- // see Amazon Web Services Identity and Access Management (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam)
- // in the Example ARNs section of the Amazon Web Services General Reference.
+ // of an Amazon Web Services principal. Valid principals include Amazon Web
+ // Services accounts, IAM users, IAM roles, federated users, and assumed role
+ // users. For help with the ARN syntax for a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns)
+ // in the Identity and Access Management User Guide .
//
// RetiringPrincipal is a required field
RetiringPrincipal *string `min:"1" type:"string" required:"true"`
@@ -16146,19 +17192,18 @@ func (s *NotFoundException) RequestID() string {
type PutKeyPolicyInput struct {
_ struct{} `type:"structure"`
- // A flag to indicate whether to bypass the key policy lockout safety check.
+ // Skips ("bypasses") the key policy lockout safety check. The default value
+ // is false.
//
// Setting this value to true increases the risk that the KMS key becomes unmanageable.
// Do not set this value to true indiscriminately.
//
- // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section in the Key Management Service Developer Guide.
+ // For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide.
//
// Use this parameter only when you intend to prevent the principal that is
// making the request from making a subsequent PutKeyPolicy request on the KMS
// key.
- //
- // The default value is false.
BypassPolicyLockoutSafetyCheck *bool `type:"boolean"`
// Sets the key policy on the specified KMS key.
@@ -16180,20 +17225,19 @@ type PutKeyPolicyInput struct {
//
// The key policy must meet the following criteria:
//
- // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy
- // must allow the principal that is making the PutKeyPolicy request to make
- // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk
- // that the KMS key becomes unmanageable. For more information, refer to
- // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section of the Key Management Service Developer Guide.
+ // * The key policy must allow the calling principal to make a subsequent
+ // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS
+ // key becomes unmanageable. For more information, see Default key policy
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide. (To omit this condition,
+ // set BypassPolicyLockoutSafetyCheck to true.)
//
// * Each statement in the key policy must contain one or more principals.
// The principals in the key policy must exist and be visible to KMS. When
- // you create a new Amazon Web Services principal (for example, an IAM user
- // or role), you might need to enforce a delay before including the new principal
- // in a key policy because the new principal might not be immediately visible
- // to KMS. For more information, see Changes that I make are not always immediately
- // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
+ // you create a new Amazon Web Services principal, you might need to enforce
+ // a delay before including the new principal in a key policy because the
+ // new principal might not be immediately visible to KMS. For more information,
+ // see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
// in the Amazon Web Services Identity and Access Management User Guide.
//
// A key policy document can include only the following characters:
@@ -16208,7 +17252,7 @@ type PutKeyPolicyInput struct {
// characters
//
// For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
- // in the Key Management Service Developer Guide. For help writing and formatting
+ // in the Key Management Service Developer Guide.For help writing and formatting
// a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)
// in the Identity and Access Management User Guide .
//
@@ -16606,19 +17650,18 @@ func (s *ReEncryptOutput) SetSourceKeyId(v string) *ReEncryptOutput {
type ReplicateKeyInput struct {
_ struct{} `type:"structure"`
- // A flag to indicate whether to bypass the key policy lockout safety check.
+ // Skips ("bypasses") the key policy lockout safety check. The default value
+ // is false.
//
// Setting this value to true increases the risk that the KMS key becomes unmanageable.
// Do not set this value to true indiscriminately.
//
- // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section in the Key Management Service Developer Guide.
+ // For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide.
//
// Use this parameter only when you intend to prevent the principal that is
// making the request from making a subsequent PutKeyPolicy request on the KMS
// key.
- //
- // The default value is false.
BypassPolicyLockoutSafetyCheck *bool `type:"boolean"`
// A description of the KMS key. The default value is an empty string (no description).
@@ -16655,20 +17698,20 @@ type ReplicateKeyInput struct {
//
// If you provide a key policy, it must meet the following criteria:
//
- // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy
- // must give the caller kms:PutKeyPolicy permission on the replica key. This
- // reduces the risk that the KMS key becomes unmanageable. For more information,
- // refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section of the Key Management Service Developer Guide .
+ // * The key policy must allow the calling principal to make a subsequent
+ // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS
+ // key becomes unmanageable. For more information, see Default key policy
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide. (To omit this condition,
+ // set BypassPolicyLockoutSafetyCheck to true.)
//
// * Each statement in the key policy must contain one or more principals.
// The principals in the key policy must exist and be visible to KMS. When
- // you create a new Amazon Web Services principal (for example, an IAM user
- // or role), you might need to enforce a delay before including the new principal
- // in a key policy because the new principal might not be immediately visible
- // to KMS. For more information, see Changes that I make are not always immediately
- // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
- // in the Identity and Access Management User Guide .
+ // you create a new Amazon Web Services principal, you might need to enforce
+ // a delay before including the new principal in a key policy because the
+ // new principal might not be immediately visible to KMS. For more information,
+ // see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
+ // in the Amazon Web Services Identity and Access Management User Guide.
//
// A key policy document can include only the following characters:
//
@@ -16720,7 +17763,7 @@ type ReplicateKeyInput struct {
// operation.
//
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
- // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+ // For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
@@ -17278,10 +18321,10 @@ type SignInput struct {
KeyId *string `min:"1" type:"string" required:"true"`
// Specifies the message or message digest to sign. Messages can be 0-4096 bytes.
- // To sign a larger message, provide the message digest.
+ // To sign a larger message, provide a message digest.
//
- // If you provide a message, KMS generates a hash digest of the message and
- // then signs it.
+ // If you provide a message digest, use the DIGEST value of MessageType to prevent
+ // the digest from being hashed again while signing.
//
// Message is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by SignInput's
@@ -17292,15 +18335,44 @@ type SignInput struct {
// Message is a required field
Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"`
- // Tells KMS whether the value of the Message parameter is a message or message
- // digest. The default value, RAW, indicates a message. To indicate a message
- // digest, enter DIGEST.
+ // Tells KMS whether the value of the Message parameter should be hashed as
+ // part of the signing algorithm. Use RAW for unhashed messages; use DIGEST
+ // for message digests, which are already hashed.
+ //
+ // When the value of MessageType is RAW, KMS uses the standard signing algorithm,
+ // which begins with a hash function. When the value is DIGEST, KMS skips the
+ // hashing step in the signing algorithm.
+ //
+ // Use the DIGEST value only when the value of the Message parameter is a message
+ // digest. If you use the DIGEST value with an unhashed message, the security
+ // of the signing operation can be compromised.
+ //
+ // When the value of MessageTypeis DIGEST, the length of the Message value must
+ // match the length of hashed messages for the specified signing algorithm.
+ //
+ // You can submit a message digest and omit the MessageType or specify RAW so
+ // the digest is hashed again while signing. However, this can cause verification
+ // failures when verifying with a system that assumes a single hash.
+ //
+ // The hashing algorithm in that Sign uses is based on the SigningAlgorithm
+ // value.
+ //
+ // * Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.
+ //
+ // * Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.
+ //
+ // * Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.
+ //
+ // * SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification
+ // with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
MessageType *string `type:"string" enum:"MessageType"`
// Specifies the signing algorithm to use when signing the message.
//
// Choose an algorithm that is compatible with the type and size of the specified
- // asymmetric KMS key.
+ // asymmetric KMS key. When signing with RSA key pairs, RSASSA-PSS algorithms
+ // are preferred. We include RSASSA-PKCS1-v1_5 algorithms for compatibility
+ // with existing applications.
//
// SigningAlgorithm is a required field
SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithmSpec"`
@@ -17863,8 +18935,8 @@ type UpdateAliasInput struct {
//
// The KMS key must be in the same Amazon Web Services account and Region as
// the alias. Also, the new target KMS key must be the same type as the current
- // target KMS key (both symmetric or both asymmetric) and they must have the
- // same key usage.
+ // target KMS key (both symmetric or both asymmetric or both HMAC) and they
+ // must have the same key usage.
//
// Specify the key ID or key ARN of the KMS key.
//
@@ -17959,7 +19031,8 @@ func (s UpdateAliasOutput) GoString() string {
type UpdateCustomKeyStoreInput struct {
_ struct{} `type:"structure"`
- // Associates the custom key store with a related CloudHSM cluster.
+ // Associates the custom key store with a related CloudHSM cluster. This parameter
+ // is valid only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.
//
// Enter the cluster ID of the cluster that you used to create the custom key
// store or a cluster that shares a backup history and has the same cluster
@@ -17969,6 +19042,8 @@ type UpdateCustomKeyStoreInput struct {
// for a cluster associated with a custom key store. To view the cluster certificate
// of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
+ //
+ // To change this value, the CloudHSM key store must be disconnected.
CloudHsmClusterId *string `min:"19" type:"string"`
// Identifies the custom key store that you want to update. Enter the ID of
@@ -17979,12 +19054,15 @@ type UpdateCustomKeyStoreInput struct {
CustomKeyStoreId *string `min:"1" type:"string" required:"true"`
// Enter the current password of the kmsuser crypto user (CU) in the CloudHSM
- // cluster that is associated with the custom key store.
+ // cluster that is associated with the custom key store. This parameter is valid
+ // only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.
//
// This parameter tells KMS the current password of the kmsuser crypto user
// (CU). It does not set or change the password of any users in the CloudHSM
// cluster.
//
+ // To change this value, the CloudHSM key store must be disconnected.
+ //
// KeyStorePassword is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by UpdateCustomKeyStoreInput's
// String and GoString methods.
@@ -17992,7 +19070,82 @@ type UpdateCustomKeyStoreInput struct {
// Changes the friendly name of the custom key store to the value that you specify.
// The custom key store name must be unique in the Amazon Web Services account.
+ //
+ // To change this value, an CloudHSM key store must be disconnected. An external
+ // key store can be connected or disconnected.
NewCustomKeyStoreName *string `min:"1" type:"string"`
+
+ // Changes the credentials that KMS uses to sign requests to the external key
+ // store proxy (XKS proxy). This parameter is valid only for custom key stores
+ // with a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // You must specify both the AccessKeyId and SecretAccessKey value in the authentication
+ // credential, even if you are only updating one value.
+ //
+ // This parameter doesn't establish or change your authentication credentials
+ // on the proxy. It just tells KMS the credential that you established with
+ // your external key store proxy. For example, if you rotate the credential
+ // on your external key store proxy, you can use this parameter to update the
+ // credential in KMS.
+ //
+ // You can change this value when the external key store is connected or disconnected.
+ XksProxyAuthenticationCredential *XksProxyAuthenticationCredentialType `type:"structure"`
+
+ // Changes the connectivity setting for the external key store. To indicate
+ // that the external key store proxy uses a Amazon VPC endpoint service to communicate
+ // with KMS, specify VPC_ENDPOINT_SERVICE. Otherwise, specify PUBLIC_ENDPOINT.
+ //
+ // If you change the XksProxyConnectivity to VPC_ENDPOINT_SERVICE, you must
+ // also change the XksProxyUriEndpoint and add an XksProxyVpcEndpointServiceName
+ // value.
+ //
+ // If you change the XksProxyConnectivity to PUBLIC_ENDPOINT, you must also
+ // change the XksProxyUriEndpoint and specify a null or empty string for the
+ // XksProxyVpcEndpointServiceName value.
+ //
+ // To change this value, the external key store must be disconnected.
+ XksProxyConnectivity *string `type:"string" enum:"XksProxyConnectivityType"`
+
+ // Changes the URI endpoint that KMS uses to connect to your external key store
+ // proxy (XKS proxy). This parameter is valid only for custom key stores with
+ // a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // For external key stores with an XksProxyConnectivity value of PUBLIC_ENDPOINT,
+ // the protocol must be HTTPS.
+ //
+ // For external key stores with an XksProxyConnectivity value of VPC_ENDPOINT_SERVICE,
+ // specify https:// followed by the private DNS name associated with the VPC
+ // endpoint service. Each external key store must use a different private DNS
+ // name.
+ //
+ // The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique
+ // in the Amazon Web Services account and Region.
+ //
+ // To change this value, the external key store must be disconnected.
+ XksProxyUriEndpoint *string `min:"10" type:"string"`
+
+ // Changes the base path to the proxy APIs for this external key store. To find
+ // this value, see the documentation for your external key manager and external
+ // key store proxy (XKS proxy). This parameter is valid only for custom key
+ // stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // The value must start with / and must end with /kms/xks/v1, where v1 represents
+ // the version of the KMS external key store proxy API. You can include an optional
+ // prefix between the required elements such as /example/kms/xks/v1.
+ //
+ // The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique
+ // in the Amazon Web Services account and Region.
+ //
+ // You can change this value when the external key store is connected or disconnected.
+ XksProxyUriPath *string `min:"10" type:"string"`
+
+ // Changes the name that KMS uses to identify the Amazon VPC endpoint service
+ // for your external key store proxy (XKS proxy). This parameter is valid when
+ // the CustomKeyStoreType is EXTERNAL_KEY_STORE and the XksProxyConnectivity
+ // is VPC_ENDPOINT_SERVICE.
+ //
+ // To change this value, the external key store must be disconnected.
+ XksProxyVpcEndpointServiceName *string `min:"20" type:"string"`
}
// String returns the string representation.
@@ -18031,6 +19184,20 @@ func (s *UpdateCustomKeyStoreInput) Validate() error {
if s.NewCustomKeyStoreName != nil && len(*s.NewCustomKeyStoreName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("NewCustomKeyStoreName", 1))
}
+ if s.XksProxyUriEndpoint != nil && len(*s.XksProxyUriEndpoint) < 10 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyUriEndpoint", 10))
+ }
+ if s.XksProxyUriPath != nil && len(*s.XksProxyUriPath) < 10 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyUriPath", 10))
+ }
+ if s.XksProxyVpcEndpointServiceName != nil && len(*s.XksProxyVpcEndpointServiceName) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyVpcEndpointServiceName", 20))
+ }
+ if s.XksProxyAuthenticationCredential != nil {
+ if err := s.XksProxyAuthenticationCredential.Validate(); err != nil {
+ invalidParams.AddNested("XksProxyAuthenticationCredential", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -18062,6 +19229,36 @@ func (s *UpdateCustomKeyStoreInput) SetNewCustomKeyStoreName(v string) *UpdateCu
return s
}
+// SetXksProxyAuthenticationCredential sets the XksProxyAuthenticationCredential field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyAuthenticationCredential(v *XksProxyAuthenticationCredentialType) *UpdateCustomKeyStoreInput {
+ s.XksProxyAuthenticationCredential = v
+ return s
+}
+
+// SetXksProxyConnectivity sets the XksProxyConnectivity field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyConnectivity(v string) *UpdateCustomKeyStoreInput {
+ s.XksProxyConnectivity = &v
+ return s
+}
+
+// SetXksProxyUriEndpoint sets the XksProxyUriEndpoint field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyUriEndpoint(v string) *UpdateCustomKeyStoreInput {
+ s.XksProxyUriEndpoint = &v
+ return s
+}
+
+// SetXksProxyUriPath sets the XksProxyUriPath field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyUriPath(v string) *UpdateCustomKeyStoreInput {
+ s.XksProxyUriPath = &v
+ return s
+}
+
+// SetXksProxyVpcEndpointServiceName sets the XksProxyVpcEndpointServiceName field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyVpcEndpointServiceName(v string) *UpdateCustomKeyStoreInput {
+ s.XksProxyVpcEndpointServiceName = &v
+ return s
+}
+
type UpdateCustomKeyStoreOutput struct {
_ struct{} `type:"structure"`
}
@@ -18337,13 +19534,37 @@ type VerifyInput struct {
// Message is a required field
Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"`
- // Tells KMS whether the value of the Message parameter is a message or message
- // digest. The default value, RAW, indicates a message. To indicate a message
- // digest, enter DIGEST.
+ // Tells KMS whether the value of the Message parameter should be hashed as
+ // part of the signing algorithm. Use RAW for unhashed messages; use DIGEST
+ // for message digests, which are already hashed.
+ //
+ // When the value of MessageType is RAW, KMS uses the standard signing algorithm,
+ // which begins with a hash function. When the value is DIGEST, KMS skips the
+ // hashing step in the signing algorithm.
//
// Use the DIGEST value only when the value of the Message parameter is a message
- // digest. If you use the DIGEST value with a raw message, the security of the
- // verification operation can be compromised.
+ // digest. If you use the DIGEST value with an unhashed message, the security
+ // of the verification operation can be compromised.
+ //
+ // When the value of MessageTypeis DIGEST, the length of the Message value must
+ // match the length of hashed messages for the specified signing algorithm.
+ //
+ // You can submit a message digest and omit the MessageType or specify RAW so
+ // the digest is hashed again while signing. However, if the signed message
+ // is hashed once while signing, but twice while verifying, verification fails,
+ // even when the message hasn't changed.
+ //
+ // The hashing algorithm in that Verify uses is based on the SigningAlgorithm
+ // value.
+ //
+ // * Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.
+ //
+ // * Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.
+ //
+ // * Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.
+ //
+ // * SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification
+ // with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
MessageType *string `type:"string" enum:"MessageType"`
// The signature that the Sign operation generated.
@@ -18683,47 +19904,1069 @@ func (s *VerifyOutput) SetSigningAlgorithm(v string) *VerifyOutput {
return s
}
-const (
- // AlgorithmSpecRsaesPkcs1V15 is a AlgorithmSpec enum value
- AlgorithmSpecRsaesPkcs1V15 = "RSAES_PKCS1_V1_5"
+// The request was rejected because the (XksKeyId) is already associated with
+// a KMS key in this external key store. Each KMS key in an external key store
+// must be associated with a different external key.
+type XksKeyAlreadyInUseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
- // AlgorithmSpecRsaesOaepSha1 is a AlgorithmSpec enum value
- AlgorithmSpecRsaesOaepSha1 = "RSAES_OAEP_SHA_1"
+ Message_ *string `locationName:"message" type:"string"`
+}
- // AlgorithmSpecRsaesOaepSha256 is a AlgorithmSpec enum value
- AlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256"
-)
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyAlreadyInUseException) String() string {
+ return awsutil.Prettify(s)
+}
-// AlgorithmSpec_Values returns all elements of the AlgorithmSpec enum
-func AlgorithmSpec_Values() []string {
- return []string{
- AlgorithmSpecRsaesPkcs1V15,
- AlgorithmSpecRsaesOaepSha1,
- AlgorithmSpecRsaesOaepSha256,
- }
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyAlreadyInUseException) GoString() string {
+ return s.String()
}
-const (
- // ConnectionErrorCodeTypeInvalidCredentials is a ConnectionErrorCodeType enum value
- ConnectionErrorCodeTypeInvalidCredentials = "INVALID_CREDENTIALS"
+func newErrorXksKeyAlreadyInUseException(v protocol.ResponseMetadata) error {
+ return &XksKeyAlreadyInUseException{
+ RespMetadata: v,
+ }
+}
- // ConnectionErrorCodeTypeClusterNotFound is a ConnectionErrorCodeType enum value
- ConnectionErrorCodeTypeClusterNotFound = "CLUSTER_NOT_FOUND"
+// Code returns the exception type name.
+func (s *XksKeyAlreadyInUseException) Code() string {
+ return "XksKeyAlreadyInUseException"
+}
- // ConnectionErrorCodeTypeNetworkErrors is a ConnectionErrorCodeType enum value
- ConnectionErrorCodeTypeNetworkErrors = "NETWORK_ERRORS"
+// Message returns the exception's message.
+func (s *XksKeyAlreadyInUseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
- // ConnectionErrorCodeTypeInternalError is a ConnectionErrorCodeType enum value
- ConnectionErrorCodeTypeInternalError = "INTERNAL_ERROR"
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksKeyAlreadyInUseException) OrigErr() error {
+ return nil
+}
- // ConnectionErrorCodeTypeInsufficientCloudhsmHsms is a ConnectionErrorCodeType enum value
- ConnectionErrorCodeTypeInsufficientCloudhsmHsms = "INSUFFICIENT_CLOUDHSM_HSMS"
+func (s *XksKeyAlreadyInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
- // ConnectionErrorCodeTypeUserLockedOut is a ConnectionErrorCodeType enum value
- ConnectionErrorCodeTypeUserLockedOut = "USER_LOCKED_OUT"
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksKeyAlreadyInUseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
- // ConnectionErrorCodeTypeUserNotFound is a ConnectionErrorCodeType enum value
- ConnectionErrorCodeTypeUserNotFound = "USER_NOT_FOUND"
+// RequestID returns the service's response RequestID for request.
+func (s *XksKeyAlreadyInUseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Information about the external key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)that
+// is associated with a KMS key in an external key store.
+//
+// This element appears in a CreateKey or DescribeKey response only for a KMS
+// key in an external key store.
+//
+// The external key is a symmetric encryption key that is hosted by an external
+// key manager outside of Amazon Web Services. When you use the KMS key in an
+// external key store in a cryptographic operation, the cryptographic operation
+// is performed in the external key manager using the specified external key.
+// For more information, see External key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)
+// in the Key Management Service Developer Guide.
+type XksKeyConfigurationType struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the external key in its external key manager. This is the ID that
+ // the external key store proxy uses to identify the external key.
+ Id *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyConfigurationType) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyConfigurationType) GoString() string {
+ return s.String()
+}
+
+// SetId sets the Id field's value.
+func (s *XksKeyConfigurationType) SetId(v string) *XksKeyConfigurationType {
+ s.Id = &v
+ return s
+}
+
+// The request was rejected because the external key specified by the XksKeyId
+// parameter did not meet the configuration requirements for an external key
+// store.
+//
+// The external key must be an AES-256 symmetric key that is enabled and performs
+// encryption and decryption.
+type XksKeyInvalidConfigurationException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyInvalidConfigurationException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyInvalidConfigurationException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksKeyInvalidConfigurationException(v protocol.ResponseMetadata) error {
+ return &XksKeyInvalidConfigurationException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksKeyInvalidConfigurationException) Code() string {
+ return "XksKeyInvalidConfigurationException"
+}
+
+// Message returns the exception's message.
+func (s *XksKeyInvalidConfigurationException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksKeyInvalidConfigurationException) OrigErr() error {
+ return nil
+}
+
+func (s *XksKeyInvalidConfigurationException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksKeyInvalidConfigurationException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksKeyInvalidConfigurationException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the external key store proxy could not find
+// the external key. This exception is thrown when the value of the XksKeyId
+// parameter doesn't identify a key in the external key manager associated with
+// the external key proxy.
+//
+// Verify that the XksKeyId represents an existing key in the external key manager.
+// Use the key identifier that the external key store proxy uses to identify
+// the key. For details, see the documentation provided with your external key
+// store proxy or key manager.
+type XksKeyNotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyNotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyNotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksKeyNotFoundException(v protocol.ResponseMetadata) error {
+ return &XksKeyNotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksKeyNotFoundException) Code() string {
+ return "XksKeyNotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *XksKeyNotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksKeyNotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *XksKeyNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksKeyNotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksKeyNotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// KMS uses the authentication credential to sign requests that it sends to
+// the external key store proxy (XKS proxy) on your behalf. You establish these
+// credentials on your external key store proxy and report them to KMS.
+//
+// The XksProxyAuthenticationCredential includes two required elements.
+type XksProxyAuthenticationCredentialType struct {
+ _ struct{} `type:"structure"`
+
+ // A unique identifier for the raw secret access key.
+ //
+ // AccessKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by XksProxyAuthenticationCredentialType's
+ // String and GoString methods.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"20" type:"string" required:"true" sensitive:"true"`
+
+ // A secret string of 43-64 characters. Valid characters are a-z, A-Z, 0-9,
+ // /, +, and =.
+ //
+ // RawSecretAccessKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by XksProxyAuthenticationCredentialType's
+ // String and GoString methods.
+ //
+ // RawSecretAccessKey is a required field
+ RawSecretAccessKey *string `min:"43" type:"string" required:"true" sensitive:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyAuthenticationCredentialType) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyAuthenticationCredentialType) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *XksProxyAuthenticationCredentialType) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "XksProxyAuthenticationCredentialType"}
+ if s.AccessKeyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessKeyId"))
+ }
+ if s.AccessKeyId != nil && len(*s.AccessKeyId) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 20))
+ }
+ if s.RawSecretAccessKey == nil {
+ invalidParams.Add(request.NewErrParamRequired("RawSecretAccessKey"))
+ }
+ if s.RawSecretAccessKey != nil && len(*s.RawSecretAccessKey) < 43 {
+ invalidParams.Add(request.NewErrParamMinLen("RawSecretAccessKey", 43))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *XksProxyAuthenticationCredentialType) SetAccessKeyId(v string) *XksProxyAuthenticationCredentialType {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetRawSecretAccessKey sets the RawSecretAccessKey field's value.
+func (s *XksProxyAuthenticationCredentialType) SetRawSecretAccessKey(v string) *XksProxyAuthenticationCredentialType {
+ s.RawSecretAccessKey = &v
+ return s
+}
+
+// Detailed information about the external key store proxy (XKS proxy). Your
+// external key store proxy translates KMS requests into a format that your
+// external key manager can understand. These fields appear in a DescribeCustomKeyStores
+// response only when the CustomKeyStoreType is EXTERNAL_KEY_STORE.
+type XksProxyConfigurationType struct {
+ _ struct{} `type:"structure"`
+
+ // The part of the external key store proxy authentication credential (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateCustomKeyStore.html#KMS-CreateCustomKeyStore-request-XksProxyAuthenticationCredential)
+ // that uniquely identifies the secret access key.
+ //
+ // AccessKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by XksProxyConfigurationType's
+ // String and GoString methods.
+ AccessKeyId *string `min:"20" type:"string" sensitive:"true"`
+
+ // Indicates whether the external key store proxy uses a public endpoint or
+ // an Amazon VPC endpoint service to communicate with KMS.
+ Connectivity *string `type:"string" enum:"XksProxyConnectivityType"`
+
+ // The URI endpoint for the external key store proxy.
+ //
+ // If the external key store proxy has a public endpoint, it is displayed here.
+ //
+ // If the external key store proxy uses an Amazon VPC endpoint service name,
+ // this field displays the private DNS name associated with the VPC endpoint
+ // service.
+ UriEndpoint *string `min:"10" type:"string"`
+
+ // The path to the external key store proxy APIs.
+ UriPath *string `min:"10" type:"string"`
+
+ // The Amazon VPC endpoint service used to communicate with the external key
+ // store proxy. This field appears only when the external key store proxy uses
+ // an Amazon VPC endpoint service to communicate with KMS.
+ VpcEndpointServiceName *string `min:"20" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyConfigurationType) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyConfigurationType) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *XksProxyConfigurationType) SetAccessKeyId(v string) *XksProxyConfigurationType {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetConnectivity sets the Connectivity field's value.
+func (s *XksProxyConfigurationType) SetConnectivity(v string) *XksProxyConfigurationType {
+ s.Connectivity = &v
+ return s
+}
+
+// SetUriEndpoint sets the UriEndpoint field's value.
+func (s *XksProxyConfigurationType) SetUriEndpoint(v string) *XksProxyConfigurationType {
+ s.UriEndpoint = &v
+ return s
+}
+
+// SetUriPath sets the UriPath field's value.
+func (s *XksProxyConfigurationType) SetUriPath(v string) *XksProxyConfigurationType {
+ s.UriPath = &v
+ return s
+}
+
+// SetVpcEndpointServiceName sets the VpcEndpointServiceName field's value.
+func (s *XksProxyConfigurationType) SetVpcEndpointServiceName(v string) *XksProxyConfigurationType {
+ s.VpcEndpointServiceName = &v
+ return s
+}
+
+// The request was rejected because the proxy credentials failed to authenticate
+// to the specified external key store proxy. The specified external key store
+// proxy rejected a status request from KMS due to invalid credentials. This
+// can indicate an error in the credentials or in the identification of the
+// external key store proxy.
+type XksProxyIncorrectAuthenticationCredentialException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyIncorrectAuthenticationCredentialException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyIncorrectAuthenticationCredentialException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyIncorrectAuthenticationCredentialException(v protocol.ResponseMetadata) error {
+ return &XksProxyIncorrectAuthenticationCredentialException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyIncorrectAuthenticationCredentialException) Code() string {
+ return "XksProxyIncorrectAuthenticationCredentialException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyIncorrectAuthenticationCredentialException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyIncorrectAuthenticationCredentialException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyIncorrectAuthenticationCredentialException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyIncorrectAuthenticationCredentialException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyIncorrectAuthenticationCredentialException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store proxy. For details,
+// see the exception message.
+type XksProxyInvalidConfigurationException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyInvalidConfigurationException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyInvalidConfigurationException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyInvalidConfigurationException(v protocol.ResponseMetadata) error {
+ return &XksProxyInvalidConfigurationException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyInvalidConfigurationException) Code() string {
+ return "XksProxyInvalidConfigurationException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyInvalidConfigurationException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyInvalidConfigurationException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyInvalidConfigurationException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyInvalidConfigurationException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyInvalidConfigurationException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// KMS cannot interpret the response it received from the external key store
+// proxy. The problem might be a poorly constructed response, but it could also
+// be a transient network issue. If you see this error repeatedly, report it
+// to the proxy vendor.
+type XksProxyInvalidResponseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyInvalidResponseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyInvalidResponseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyInvalidResponseException(v protocol.ResponseMetadata) error {
+ return &XksProxyInvalidResponseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyInvalidResponseException) Code() string {
+ return "XksProxyInvalidResponseException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyInvalidResponseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyInvalidResponseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyInvalidResponseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyInvalidResponseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyInvalidResponseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// is already associated with an external key store in the Amazon Web Services
+// account and Region. Each external key store in an account and Region must
+// use a unique external key store proxy address.
+type XksProxyUriEndpointInUseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriEndpointInUseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriEndpointInUseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyUriEndpointInUseException(v protocol.ResponseMetadata) error {
+ return &XksProxyUriEndpointInUseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyUriEndpointInUseException) Code() string {
+ return "XksProxyUriEndpointInUseException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyUriEndpointInUseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyUriEndpointInUseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyUriEndpointInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyUriEndpointInUseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyUriEndpointInUseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// and XksProxyUriPath is already associated with an external key store in the
+// Amazon Web Services account and Region. Each external key store in an account
+// and Region must use a unique external key store proxy API address.
+type XksProxyUriInUseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriInUseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriInUseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyUriInUseException(v protocol.ResponseMetadata) error {
+ return &XksProxyUriInUseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyUriInUseException) Code() string {
+ return "XksProxyUriInUseException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyUriInUseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyUriInUseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyUriInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyUriInUseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyUriInUseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// KMS was unable to reach the specified XksProxyUriPath. The path must be reachable
+// before you create the external key store or update its settings.
+//
+// This exception is also thrown when the external key store proxy response
+// to a GetHealthStatus request indicates that all external key manager instances
+// are unavailable.
+type XksProxyUriUnreachableException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriUnreachableException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriUnreachableException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyUriUnreachableException(v protocol.ResponseMetadata) error {
+ return &XksProxyUriUnreachableException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyUriUnreachableException) Code() string {
+ return "XksProxyUriUnreachableException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyUriUnreachableException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyUriUnreachableException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyUriUnreachableException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyUriUnreachableException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyUriUnreachableException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the specified Amazon VPC endpoint service
+// is already associated with an external key store in the Amazon Web Services
+// account and Region. Each external key store in an Amazon Web Services account
+// and Region must use a different Amazon VPC endpoint service.
+type XksProxyVpcEndpointServiceInUseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceInUseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceInUseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyVpcEndpointServiceInUseException(v protocol.ResponseMetadata) error {
+ return &XksProxyVpcEndpointServiceInUseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyVpcEndpointServiceInUseException) Code() string {
+ return "XksProxyVpcEndpointServiceInUseException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyVpcEndpointServiceInUseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyVpcEndpointServiceInUseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyVpcEndpointServiceInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyVpcEndpointServiceInUseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyVpcEndpointServiceInUseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store proxy. For details,
+// see the exception message and review the requirements (kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+// for Amazon VPC endpoint service connectivity for an external key store.
+type XksProxyVpcEndpointServiceInvalidConfigurationException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceInvalidConfigurationException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceInvalidConfigurationException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyVpcEndpointServiceInvalidConfigurationException(v protocol.ResponseMetadata) error {
+ return &XksProxyVpcEndpointServiceInvalidConfigurationException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) Code() string {
+ return "XksProxyVpcEndpointServiceInvalidConfigurationException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because KMS could not find the specified VPC endpoint
+// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name
+// for the external key store. Also, confirm that the Allow principals list
+// for the VPC endpoint service includes the KMS service principal for the Region,
+// such as cks.kms.us-east-1.amazonaws.com.
+type XksProxyVpcEndpointServiceNotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceNotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceNotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyVpcEndpointServiceNotFoundException(v protocol.ResponseMetadata) error {
+ return &XksProxyVpcEndpointServiceNotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyVpcEndpointServiceNotFoundException) Code() string {
+ return "XksProxyVpcEndpointServiceNotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyVpcEndpointServiceNotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyVpcEndpointServiceNotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyVpcEndpointServiceNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyVpcEndpointServiceNotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyVpcEndpointServiceNotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+const (
+ // AlgorithmSpecRsaesPkcs1V15 is a AlgorithmSpec enum value
+ AlgorithmSpecRsaesPkcs1V15 = "RSAES_PKCS1_V1_5"
+
+ // AlgorithmSpecRsaesOaepSha1 is a AlgorithmSpec enum value
+ AlgorithmSpecRsaesOaepSha1 = "RSAES_OAEP_SHA_1"
+
+ // AlgorithmSpecRsaesOaepSha256 is a AlgorithmSpec enum value
+ AlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256"
+)
+
+// AlgorithmSpec_Values returns all elements of the AlgorithmSpec enum
+func AlgorithmSpec_Values() []string {
+ return []string{
+ AlgorithmSpecRsaesPkcs1V15,
+ AlgorithmSpecRsaesOaepSha1,
+ AlgorithmSpecRsaesOaepSha256,
+ }
+}
+
+const (
+ // ConnectionErrorCodeTypeInvalidCredentials is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeInvalidCredentials = "INVALID_CREDENTIALS"
+
+ // ConnectionErrorCodeTypeClusterNotFound is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeClusterNotFound = "CLUSTER_NOT_FOUND"
+
+ // ConnectionErrorCodeTypeNetworkErrors is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeNetworkErrors = "NETWORK_ERRORS"
+
+ // ConnectionErrorCodeTypeInternalError is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeInternalError = "INTERNAL_ERROR"
+
+ // ConnectionErrorCodeTypeInsufficientCloudhsmHsms is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeInsufficientCloudhsmHsms = "INSUFFICIENT_CLOUDHSM_HSMS"
+
+ // ConnectionErrorCodeTypeUserLockedOut is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeUserLockedOut = "USER_LOCKED_OUT"
+
+ // ConnectionErrorCodeTypeUserNotFound is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeUserNotFound = "USER_NOT_FOUND"
// ConnectionErrorCodeTypeUserLoggedIn is a ConnectionErrorCodeType enum value
ConnectionErrorCodeTypeUserLoggedIn = "USER_LOGGED_IN"
@@ -18733,6 +20976,30 @@ const (
// ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet is a ConnectionErrorCodeType enum value
ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet = "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET"
+
+ // ConnectionErrorCodeTypeXksProxyAccessDenied is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyAccessDenied = "XKS_PROXY_ACCESS_DENIED"
+
+ // ConnectionErrorCodeTypeXksProxyNotReachable is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyNotReachable = "XKS_PROXY_NOT_REACHABLE"
+
+ // ConnectionErrorCodeTypeXksVpcEndpointServiceNotFound is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksVpcEndpointServiceNotFound = "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND"
+
+ // ConnectionErrorCodeTypeXksProxyInvalidResponse is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyInvalidResponse = "XKS_PROXY_INVALID_RESPONSE"
+
+ // ConnectionErrorCodeTypeXksProxyInvalidConfiguration is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyInvalidConfiguration = "XKS_PROXY_INVALID_CONFIGURATION"
+
+ // ConnectionErrorCodeTypeXksVpcEndpointServiceInvalidConfiguration is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksVpcEndpointServiceInvalidConfiguration = "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION"
+
+ // ConnectionErrorCodeTypeXksProxyTimedOut is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyTimedOut = "XKS_PROXY_TIMED_OUT"
+
+ // ConnectionErrorCodeTypeXksProxyInvalidTlsConfiguration is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyInvalidTlsConfiguration = "XKS_PROXY_INVALID_TLS_CONFIGURATION"
)
// ConnectionErrorCodeType_Values returns all elements of the ConnectionErrorCodeType enum
@@ -18748,6 +21015,14 @@ func ConnectionErrorCodeType_Values() []string {
ConnectionErrorCodeTypeUserLoggedIn,
ConnectionErrorCodeTypeSubnetNotFound,
ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet,
+ ConnectionErrorCodeTypeXksProxyAccessDenied,
+ ConnectionErrorCodeTypeXksProxyNotReachable,
+ ConnectionErrorCodeTypeXksVpcEndpointServiceNotFound,
+ ConnectionErrorCodeTypeXksProxyInvalidResponse,
+ ConnectionErrorCodeTypeXksProxyInvalidConfiguration,
+ ConnectionErrorCodeTypeXksVpcEndpointServiceInvalidConfiguration,
+ ConnectionErrorCodeTypeXksProxyTimedOut,
+ ConnectionErrorCodeTypeXksProxyInvalidTlsConfiguration,
}
}
@@ -18779,6 +21054,22 @@ func ConnectionStateType_Values() []string {
}
}
+const (
+ // CustomKeyStoreTypeAwsCloudhsm is a CustomKeyStoreType enum value
+ CustomKeyStoreTypeAwsCloudhsm = "AWS_CLOUDHSM"
+
+ // CustomKeyStoreTypeExternalKeyStore is a CustomKeyStoreType enum value
+ CustomKeyStoreTypeExternalKeyStore = "EXTERNAL_KEY_STORE"
+)
+
+// CustomKeyStoreType_Values returns all elements of the CustomKeyStoreType enum
+func CustomKeyStoreType_Values() []string {
+ return []string{
+ CustomKeyStoreTypeAwsCloudhsm,
+ CustomKeyStoreTypeExternalKeyStore,
+ }
+}
+
const (
// CustomerMasterKeySpecRsa2048 is a CustomerMasterKeySpec enum value
CustomerMasterKeySpecRsa2048 = "RSA_2048"
@@ -19208,6 +21499,9 @@ const (
// OriginTypeAwsCloudhsm is a OriginType enum value
OriginTypeAwsCloudhsm = "AWS_CLOUDHSM"
+
+ // OriginTypeExternalKeyStore is a OriginType enum value
+ OriginTypeExternalKeyStore = "EXTERNAL_KEY_STORE"
)
// OriginType_Values returns all elements of the OriginType enum
@@ -19216,6 +21510,7 @@ func OriginType_Values() []string {
OriginTypeAwsKms,
OriginTypeExternal,
OriginTypeAwsCloudhsm,
+ OriginTypeExternalKeyStore,
}
}
@@ -19278,3 +21573,19 @@ func WrappingKeySpec_Values() []string {
WrappingKeySpecRsa2048,
}
}
+
+const (
+ // XksProxyConnectivityTypePublicEndpoint is a XksProxyConnectivityType enum value
+ XksProxyConnectivityTypePublicEndpoint = "PUBLIC_ENDPOINT"
+
+ // XksProxyConnectivityTypeVpcEndpointService is a XksProxyConnectivityType enum value
+ XksProxyConnectivityTypeVpcEndpointService = "VPC_ENDPOINT_SERVICE"
+)
+
+// XksProxyConnectivityType_Values returns all elements of the XksProxyConnectivityType enum
+func XksProxyConnectivityType_Values() []string {
+ return []string{
+ XksProxyConnectivityTypePublicEndpoint,
+ XksProxyConnectivityTypeVpcEndpointService,
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go
index d926e08e6856..babb91fc8397 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go
@@ -8,7 +8,7 @@
// For general information about KMS, see the Key Management Service Developer
// Guide (https://docs.aws.amazon.com/kms/latest/developerguide/).
//
-// KMS is replacing the term customer master key (CMK) with KMS key and KMS
+// KMS has replaced the term customer master key (CMK) with KMS key and KMS
// key. The concept has not changed. To prevent breaking changes, KMS is keeping
// some variations of this term.
//
@@ -38,14 +38,14 @@
//
// # Signing Requests
//
-// Requests must be signed by using an access key ID and a secret access key.
-// We strongly recommend that you do not use your Amazon Web Services account
-// (root) access key ID and secret key for everyday work with KMS. Instead,
-// use the access key ID and secret access key for an IAM user. You can also
-// use the Amazon Web Services Security Token Service to generate temporary
-// security credentials that you can use to sign requests.
+// Requests must be signed using an access key ID and a secret access key. We
+// strongly recommend that you do not use your Amazon Web Services account root
+// access key ID and secret access key for everyday work. You can use the access
+// key ID and secret access key for an IAM user or you can use the Security
+// Token Service (STS) to generate temporary security credentials and use those
+// to sign requests.
//
-// All KMS operations require Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
+// All KMS requests must be signed with Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
//
// # Logging API Requests
//
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go
index 4f8fc2104902..c897f638993a 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go
@@ -19,12 +19,13 @@ const (
// "CloudHsmClusterInUseException".
//
// The request was rejected because the specified CloudHSM cluster is already
- // associated with a custom key store or it shares a backup history with a cluster
- // that is associated with a custom key store. Each custom key store must be
- // associated with a different CloudHSM cluster.
+ // associated with an CloudHSM key store in the account, or it shares a backup
+ // history with an CloudHSM key store in the account. Each CloudHSM key store
+ // in the account must be associated with a different CloudHSM cluster.
//
- // Clusters that share a backup history have the same cluster certificate. To
- // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+ // CloudHSM clusters that share a backup history have the same cluster certificate.
+ // To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+ // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
ErrCodeCloudHsmClusterInUseException = "CloudHsmClusterInUseException"
@@ -32,29 +33,29 @@ const (
// "CloudHsmClusterInvalidConfigurationException".
//
// The request was rejected because the associated CloudHSM cluster did not
- // meet the configuration requirements for a custom key store.
+ // meet the configuration requirements for an CloudHSM key store.
//
- // * The cluster must be configured with private subnets in at least two
- // different Availability Zones in the Region.
+ // * The CloudHSM cluster must be configured with private subnets in at least
+ // two different Availability Zones in the Region.
//
// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
- // group ID. These rules are set by default when you create the cluster.
- // Do not delete or change them. To get information about a particular security
- // group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+ // group ID. These rules are set by default when you create the CloudHSM
+ // cluster. Do not delete or change them. To get information about a particular
+ // security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
- // * The cluster must contain at least as many HSMs as the operation requires.
- // To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+ // * The CloudHSM cluster must contain at least as many HSMs as the operation
+ // requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
- // with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+ // with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
@@ -65,10 +66,9 @@ const (
// ErrCodeCloudHsmClusterNotActiveException for service response error code
// "CloudHsmClusterNotActiveException".
//
- // The request was rejected because the CloudHSM cluster that is associated
- // with the custom key store is not active. Initialize and activate the cluster
- // and try the command again. For detailed instructions, see Getting Started
- // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+ // The request was rejected because the CloudHSM cluster associated with the
+ // CloudHSM key store is not active. Initialize and activate the cluster and
+ // try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
ErrCodeCloudHsmClusterNotActiveException = "CloudHsmClusterNotActiveException"
@@ -84,15 +84,16 @@ const (
//
// The request was rejected because the specified CloudHSM cluster has a different
// cluster certificate than the original cluster. You cannot use the operation
- // to specify an unrelated cluster.
+ // to specify an unrelated cluster for an CloudHSM key store.
//
- // Specify a cluster that shares a backup history with the original cluster.
- // This includes clusters that were created from a backup of the current cluster,
- // and clusters that were created from the same backup that produced the current
- // cluster.
+ // Specify an CloudHSM cluster that shares a backup history with the original
+ // cluster. This includes clusters that were created from a backup of the current
+ // cluster, and clusters that were created from the same backup that produced
+ // the current cluster.
//
- // Clusters that share a backup history have the same cluster certificate. To
- // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+ // CloudHSM clusters that share a backup history have the same cluster certificate.
+ // To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+ // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
ErrCodeCloudHsmClusterNotRelatedException = "CloudHsmClusterNotRelatedException"
@@ -114,17 +115,27 @@ const (
//
// This exception is thrown under the following conditions:
//
- // * You requested the CreateKey or GenerateRandom operation in a custom
- // key store that is not connected. These operations are valid only when
- // the custom key store ConnectionState is CONNECTED.
+ // * You requested the ConnectCustomKeyStore operation on a custom key store
+ // with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+ // for all other ConnectionState values. To reconnect a custom key store
+ // in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+ // it (ConnectCustomKeyStore).
+ //
+ // * You requested the CreateKey operation in a custom key store that is
+ // not connected. This operations is valid only when the custom key store
+ // ConnectionState is CONNECTED.
+ //
+ // * You requested the DisconnectCustomKeyStore operation on a custom key
+ // store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+ // is valid for all other ConnectionState values.
//
// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
- // * You requested the ConnectCustomKeyStore operation on a custom key store
- // with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
- // for all other ConnectionState values.
+ // * You requested the GenerateRandom operation in an CloudHSM key store
+ // that is not connected. This operation is valid only when the CloudHSM
+ // key store ConnectionState is CONNECTED.
ErrCodeCustomKeyStoreInvalidStateException = "CustomKeyStoreInvalidStateException"
// ErrCodeCustomKeyStoreNameInUseException for service response error code
@@ -145,8 +156,8 @@ const (
// ErrCodeDependencyTimeoutException for service response error code
// "DependencyTimeoutException".
//
- // The system timed out while trying to fulfill the request. The request can
- // be retried.
+ // The system timed out while trying to fulfill the request. You can retry the
+ // request.
ErrCodeDependencyTimeoutException = "DependencyTimeoutException"
// ErrCodeDisabledException for service response error code
@@ -183,9 +194,10 @@ const (
// "IncorrectTrustAnchorException".
//
// The request was rejected because the trust anchor certificate in the request
- // is not the trust anchor certificate for the specified CloudHSM cluster.
+ // to create an CloudHSM key store is not the trust anchor certificate for the
+ // specified CloudHSM cluster.
//
- // When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
+ // When you initialize the CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
// you create the trust anchor certificate and save it in the customerCA.crt
// file.
ErrCodeIncorrectTrustAnchorException = "IncorrectTrustAnchorException"
@@ -274,9 +286,17 @@ const (
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
- // For more information about how key state affects the use of a KMS key, see
- // Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
- // in the Key Management Service Developer Guide .
+ // This exceptions means one of the following:
+ //
+ // * The key state of the KMS key is not compatible with the operation. To
+ // find the key state, use the DescribeKey operation. For more information
+ // about which key states are compatible with each KMS operation, see Key
+ // states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+ // in the Key Management Service Developer Guide .
+ //
+ // * For cryptographic operations on KMS keys in custom key stores, this
+ // exception represents a general failure with many possible causes. To identify
+ // the cause, see the error message that accompanies the exception.
ErrCodeInvalidStateException = "KMSInvalidStateException"
// ErrCodeKMSInvalidMacException for service response error code
@@ -336,41 +356,170 @@ const (
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
ErrCodeUnsupportedOperationException = "UnsupportedOperationException"
+
+ // ErrCodeXksKeyAlreadyInUseException for service response error code
+ // "XksKeyAlreadyInUseException".
+ //
+ // The request was rejected because the (XksKeyId) is already associated with
+ // a KMS key in this external key store. Each KMS key in an external key store
+ // must be associated with a different external key.
+ ErrCodeXksKeyAlreadyInUseException = "XksKeyAlreadyInUseException"
+
+ // ErrCodeXksKeyInvalidConfigurationException for service response error code
+ // "XksKeyInvalidConfigurationException".
+ //
+ // The request was rejected because the external key specified by the XksKeyId
+ // parameter did not meet the configuration requirements for an external key
+ // store.
+ //
+ // The external key must be an AES-256 symmetric key that is enabled and performs
+ // encryption and decryption.
+ ErrCodeXksKeyInvalidConfigurationException = "XksKeyInvalidConfigurationException"
+
+ // ErrCodeXksKeyNotFoundException for service response error code
+ // "XksKeyNotFoundException".
+ //
+ // The request was rejected because the external key store proxy could not find
+ // the external key. This exception is thrown when the value of the XksKeyId
+ // parameter doesn't identify a key in the external key manager associated with
+ // the external key proxy.
+ //
+ // Verify that the XksKeyId represents an existing key in the external key manager.
+ // Use the key identifier that the external key store proxy uses to identify
+ // the key. For details, see the documentation provided with your external key
+ // store proxy or key manager.
+ ErrCodeXksKeyNotFoundException = "XksKeyNotFoundException"
+
+ // ErrCodeXksProxyIncorrectAuthenticationCredentialException for service response error code
+ // "XksProxyIncorrectAuthenticationCredentialException".
+ //
+ // The request was rejected because the proxy credentials failed to authenticate
+ // to the specified external key store proxy. The specified external key store
+ // proxy rejected a status request from KMS due to invalid credentials. This
+ // can indicate an error in the credentials or in the identification of the
+ // external key store proxy.
+ ErrCodeXksProxyIncorrectAuthenticationCredentialException = "XksProxyIncorrectAuthenticationCredentialException"
+
+ // ErrCodeXksProxyInvalidConfigurationException for service response error code
+ // "XksProxyInvalidConfigurationException".
+ //
+ // The request was rejected because the Amazon VPC endpoint service configuration
+ // does not fulfill the requirements for an external key store proxy. For details,
+ // see the exception message.
+ ErrCodeXksProxyInvalidConfigurationException = "XksProxyInvalidConfigurationException"
+
+ // ErrCodeXksProxyInvalidResponseException for service response error code
+ // "XksProxyInvalidResponseException".
+ //
+ // KMS cannot interpret the response it received from the external key store
+ // proxy. The problem might be a poorly constructed response, but it could also
+ // be a transient network issue. If you see this error repeatedly, report it
+ // to the proxy vendor.
+ ErrCodeXksProxyInvalidResponseException = "XksProxyInvalidResponseException"
+
+ // ErrCodeXksProxyUriEndpointInUseException for service response error code
+ // "XksProxyUriEndpointInUseException".
+ //
+ // The request was rejected because the concatenation of the XksProxyUriEndpoint
+ // is already associated with an external key store in the Amazon Web Services
+ // account and Region. Each external key store in an account and Region must
+ // use a unique external key store proxy address.
+ ErrCodeXksProxyUriEndpointInUseException = "XksProxyUriEndpointInUseException"
+
+ // ErrCodeXksProxyUriInUseException for service response error code
+ // "XksProxyUriInUseException".
+ //
+ // The request was rejected because the concatenation of the XksProxyUriEndpoint
+ // and XksProxyUriPath is already associated with an external key store in the
+ // Amazon Web Services account and Region. Each external key store in an account
+ // and Region must use a unique external key store proxy API address.
+ ErrCodeXksProxyUriInUseException = "XksProxyUriInUseException"
+
+ // ErrCodeXksProxyUriUnreachableException for service response error code
+ // "XksProxyUriUnreachableException".
+ //
+ // KMS was unable to reach the specified XksProxyUriPath. The path must be reachable
+ // before you create the external key store or update its settings.
+ //
+ // This exception is also thrown when the external key store proxy response
+ // to a GetHealthStatus request indicates that all external key manager instances
+ // are unavailable.
+ ErrCodeXksProxyUriUnreachableException = "XksProxyUriUnreachableException"
+
+ // ErrCodeXksProxyVpcEndpointServiceInUseException for service response error code
+ // "XksProxyVpcEndpointServiceInUseException".
+ //
+ // The request was rejected because the specified Amazon VPC endpoint service
+ // is already associated with an external key store in the Amazon Web Services
+ // account and Region. Each external key store in an Amazon Web Services account
+ // and Region must use a different Amazon VPC endpoint service.
+ ErrCodeXksProxyVpcEndpointServiceInUseException = "XksProxyVpcEndpointServiceInUseException"
+
+ // ErrCodeXksProxyVpcEndpointServiceInvalidConfigurationException for service response error code
+ // "XksProxyVpcEndpointServiceInvalidConfigurationException".
+ //
+ // The request was rejected because the Amazon VPC endpoint service configuration
+ // does not fulfill the requirements for an external key store proxy. For details,
+ // see the exception message and review the requirements (kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+ // for Amazon VPC endpoint service connectivity for an external key store.
+ ErrCodeXksProxyVpcEndpointServiceInvalidConfigurationException = "XksProxyVpcEndpointServiceInvalidConfigurationException"
+
+ // ErrCodeXksProxyVpcEndpointServiceNotFoundException for service response error code
+ // "XksProxyVpcEndpointServiceNotFoundException".
+ //
+ // The request was rejected because KMS could not find the specified VPC endpoint
+ // service. Use DescribeCustomKeyStores to verify the VPC endpoint service name
+ // for the external key store. Also, confirm that the Allow principals list
+ // for the VPC endpoint service includes the KMS service principal for the Region,
+ // such as cks.kms.us-east-1.amazonaws.com.
+ ErrCodeXksProxyVpcEndpointServiceNotFoundException = "XksProxyVpcEndpointServiceNotFoundException"
)
var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
- "AlreadyExistsException": newErrorAlreadyExistsException,
- "CloudHsmClusterInUseException": newErrorCloudHsmClusterInUseException,
- "CloudHsmClusterInvalidConfigurationException": newErrorCloudHsmClusterInvalidConfigurationException,
- "CloudHsmClusterNotActiveException": newErrorCloudHsmClusterNotActiveException,
- "CloudHsmClusterNotFoundException": newErrorCloudHsmClusterNotFoundException,
- "CloudHsmClusterNotRelatedException": newErrorCloudHsmClusterNotRelatedException,
- "CustomKeyStoreHasCMKsException": newErrorCustomKeyStoreHasCMKsException,
- "CustomKeyStoreInvalidStateException": newErrorCustomKeyStoreInvalidStateException,
- "CustomKeyStoreNameInUseException": newErrorCustomKeyStoreNameInUseException,
- "CustomKeyStoreNotFoundException": newErrorCustomKeyStoreNotFoundException,
- "DependencyTimeoutException": newErrorDependencyTimeoutException,
- "DisabledException": newErrorDisabledException,
- "ExpiredImportTokenException": newErrorExpiredImportTokenException,
- "IncorrectKeyException": newErrorIncorrectKeyException,
- "IncorrectKeyMaterialException": newErrorIncorrectKeyMaterialException,
- "IncorrectTrustAnchorException": newErrorIncorrectTrustAnchorException,
- "KMSInternalException": newErrorInternalException,
- "InvalidAliasNameException": newErrorInvalidAliasNameException,
- "InvalidArnException": newErrorInvalidArnException,
- "InvalidCiphertextException": newErrorInvalidCiphertextException,
- "InvalidGrantIdException": newErrorInvalidGrantIdException,
- "InvalidGrantTokenException": newErrorInvalidGrantTokenException,
- "InvalidImportTokenException": newErrorInvalidImportTokenException,
- "InvalidKeyUsageException": newErrorInvalidKeyUsageException,
- "InvalidMarkerException": newErrorInvalidMarkerException,
- "KMSInvalidStateException": newErrorInvalidStateException,
- "KMSInvalidMacException": newErrorKMSInvalidMacException,
- "KMSInvalidSignatureException": newErrorKMSInvalidSignatureException,
- "KeyUnavailableException": newErrorKeyUnavailableException,
- "LimitExceededException": newErrorLimitExceededException,
- "MalformedPolicyDocumentException": newErrorMalformedPolicyDocumentException,
- "NotFoundException": newErrorNotFoundException,
- "TagException": newErrorTagException,
- "UnsupportedOperationException": newErrorUnsupportedOperationException,
+ "AlreadyExistsException": newErrorAlreadyExistsException,
+ "CloudHsmClusterInUseException": newErrorCloudHsmClusterInUseException,
+ "CloudHsmClusterInvalidConfigurationException": newErrorCloudHsmClusterInvalidConfigurationException,
+ "CloudHsmClusterNotActiveException": newErrorCloudHsmClusterNotActiveException,
+ "CloudHsmClusterNotFoundException": newErrorCloudHsmClusterNotFoundException,
+ "CloudHsmClusterNotRelatedException": newErrorCloudHsmClusterNotRelatedException,
+ "CustomKeyStoreHasCMKsException": newErrorCustomKeyStoreHasCMKsException,
+ "CustomKeyStoreInvalidStateException": newErrorCustomKeyStoreInvalidStateException,
+ "CustomKeyStoreNameInUseException": newErrorCustomKeyStoreNameInUseException,
+ "CustomKeyStoreNotFoundException": newErrorCustomKeyStoreNotFoundException,
+ "DependencyTimeoutException": newErrorDependencyTimeoutException,
+ "DisabledException": newErrorDisabledException,
+ "ExpiredImportTokenException": newErrorExpiredImportTokenException,
+ "IncorrectKeyException": newErrorIncorrectKeyException,
+ "IncorrectKeyMaterialException": newErrorIncorrectKeyMaterialException,
+ "IncorrectTrustAnchorException": newErrorIncorrectTrustAnchorException,
+ "KMSInternalException": newErrorInternalException,
+ "InvalidAliasNameException": newErrorInvalidAliasNameException,
+ "InvalidArnException": newErrorInvalidArnException,
+ "InvalidCiphertextException": newErrorInvalidCiphertextException,
+ "InvalidGrantIdException": newErrorInvalidGrantIdException,
+ "InvalidGrantTokenException": newErrorInvalidGrantTokenException,
+ "InvalidImportTokenException": newErrorInvalidImportTokenException,
+ "InvalidKeyUsageException": newErrorInvalidKeyUsageException,
+ "InvalidMarkerException": newErrorInvalidMarkerException,
+ "KMSInvalidStateException": newErrorInvalidStateException,
+ "KMSInvalidMacException": newErrorKMSInvalidMacException,
+ "KMSInvalidSignatureException": newErrorKMSInvalidSignatureException,
+ "KeyUnavailableException": newErrorKeyUnavailableException,
+ "LimitExceededException": newErrorLimitExceededException,
+ "MalformedPolicyDocumentException": newErrorMalformedPolicyDocumentException,
+ "NotFoundException": newErrorNotFoundException,
+ "TagException": newErrorTagException,
+ "UnsupportedOperationException": newErrorUnsupportedOperationException,
+ "XksKeyAlreadyInUseException": newErrorXksKeyAlreadyInUseException,
+ "XksKeyInvalidConfigurationException": newErrorXksKeyInvalidConfigurationException,
+ "XksKeyNotFoundException": newErrorXksKeyNotFoundException,
+ "XksProxyIncorrectAuthenticationCredentialException": newErrorXksProxyIncorrectAuthenticationCredentialException,
+ "XksProxyInvalidConfigurationException": newErrorXksProxyInvalidConfigurationException,
+ "XksProxyInvalidResponseException": newErrorXksProxyInvalidResponseException,
+ "XksProxyUriEndpointInUseException": newErrorXksProxyUriEndpointInUseException,
+ "XksProxyUriInUseException": newErrorXksProxyUriInUseException,
+ "XksProxyUriUnreachableException": newErrorXksProxyUriUnreachableException,
+ "XksProxyVpcEndpointServiceInUseException": newErrorXksProxyVpcEndpointServiceInUseException,
+ "XksProxyVpcEndpointServiceInvalidConfigurationException": newErrorXksProxyVpcEndpointServiceInvalidConfigurationException,
+ "XksProxyVpcEndpointServiceNotFoundException": newErrorXksProxyVpcEndpointServiceNotFoundException,
}
diff --git a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index c0706bec93dd..63729d0a78b4 100644
--- a/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/cluster-autoscaler/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -56,12 +56,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// AssumeRole API operation for AWS Security Token Service.
//
// Returns a set of temporary security credentials that you can use to access
-// Amazon Web Services resources that you might not normally have access to.
-// These temporary credentials consist of an access key ID, a secret access
-// key, and a security token. Typically, you use AssumeRole within your account
-// or for cross-account access. For a comparison of AssumeRole with other API
-// operations that produce temporary credentials, see Requesting Temporary Security
-// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// Amazon Web Services resources. These temporary credentials consist of an
+// access key ID, a secret access key, and a security token. Typically, you
+// use AssumeRole within your account or for cross-account access. For a comparison
+// of AssumeRole with other API operations that produce temporary credentials,
+// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
@@ -1103,13 +1102,15 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// # Permissions
//
// You can use the temporary credentials created by GetFederationToken in any
-// Amazon Web Services service except the following:
+// Amazon Web Services service with the following exceptions:
//
// - You cannot call any IAM operations using the CLI or the Amazon Web Services
-// API.
+// API. This limitation does not apply to console sessions.
//
// - You cannot call any STS operations except GetCallerIdentity.
//
+// You can use temporary credentials for single sign-on (SSO) to the console.
+//
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
// inline session policy. You can also specify up to 10 managed policy Amazon
diff --git a/cluster-autoscaler/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/cluster-autoscaler/vendor/github.com/cenkalti/backoff/v4/.travis.yml
deleted file mode 100644
index c79105c2fbeb..000000000000
--- a/cluster-autoscaler/vendor/github.com/cenkalti/backoff/v4/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go:
- - 1.13
- - 1.x
- - tip
-before_install:
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
-script:
- - $HOME/gopath/bin/goveralls -service=travis-ci
diff --git a/cluster-autoscaler/vendor/github.com/cenkalti/backoff/v4/retry.go b/cluster-autoscaler/vendor/github.com/cenkalti/backoff/v4/retry.go
index 1ce2507ebc8b..b9c0c51cd755 100644
--- a/cluster-autoscaler/vendor/github.com/cenkalti/backoff/v4/retry.go
+++ b/cluster-autoscaler/vendor/github.com/cenkalti/backoff/v4/retry.go
@@ -5,10 +5,20 @@ import (
"time"
)
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
// An Operation is executing by Retry() or RetryNotify().
// The operation will be retried using a backoff policy if it returns an error.
type Operation func() error
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+ return func() (struct{}, error) {
+ return struct{}{}, o()
+ }
+}
+
// Notify is a notify-on-error function. It receives an operation error and
// backoff delay if the operation failed (with an error).
//
@@ -28,18 +38,41 @@ func Retry(o Operation, b BackOff) error {
return RetryNotify(o, b, nil)
}
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+ return RetryNotifyWithData(o, b, nil)
+}
+
// RetryNotify calls notify function with the error and wait duration
// for each failed attempt before sleep.
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
return RetryNotifyWithTimer(operation, b, notify, nil)
}
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+ return doRetryNotify(operation, b, notify, nil)
+}
+
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
// for each failed attempt before sleep.
// A default timer that uses system timer is used when nil is passed.
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
- var err error
- var next time.Duration
+ _, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+ return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ var (
+ err error
+ next time.Duration
+ res T
+ )
if t == nil {
t = &defaultTimer{}
}
@@ -52,21 +85,22 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer
b.Reset()
for {
- if err = operation(); err == nil {
- return nil
+ res, err = operation()
+ if err == nil {
+ return res, nil
}
var permanent *PermanentError
if errors.As(err, &permanent) {
- return permanent.Err
+ return res, permanent.Err
}
if next = b.NextBackOff(); next == Stop {
if cerr := ctx.Err(); cerr != nil {
- return cerr
+ return res, cerr
}
- return err
+ return res, err
}
if notify != nil {
@@ -77,7 +111,7 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer
select {
case <-ctx.Done():
- return ctx.Err()
+ return res, ctx.Err()
case <-t.C():
}
}
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/README.md b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/README.md
index 792b4a60b346..8bf0e5b78153 100644
--- a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/README.md
@@ -3,8 +3,7 @@
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
@@ -45,19 +47,20 @@ I recommend using the latest release of Go.
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/testall.sh b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/testall.sh
new file mode 100644
index 000000000000..94b9c443987c
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/testall.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash.go b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash.go
index 15c835d5417c..a9e0d45c9dcc 100644
--- a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -16,19 +16,11 @@ const (
prime5 uint64 = 2870177450012600261
)
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@@ -50,10 +42,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
+ d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
- d.v4 = -prime1v
+ d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
if d.n+n < 32 {
// This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
+ copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
- copy(d.mem[d.n:], b)
+ c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
+ b = b[c:]
d.n = 0
}
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
- i++
}
h ^= h >> 33
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
index be8db5bf7960..3e8b132579ec 100644
--- a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -1,215 +1,209 @@
+//go:build !appengine && gc && !purego
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
-// Register allocation:
-// AX h
-// SI pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// DI prime4v
-
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (SI), R12 \
- ADDQ $8, SI \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ DI, acc
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), DI
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
// Load slice.
- MOVQ b_base+0(FP), SI
- MOVQ b_len+8(FP), DX
- LEAQ (SI)(DX*1), BX
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32.
- SUBQ $32, BX
+ SUBQ $32, end
// Check whether we have at least one block.
- CMPQ DX, $32
+ CMPQ n, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until SI > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
JMP afterBlocks
noBlocks:
- MOVQ ·prime5v(SB), AX
+ MOVQ ·primes+32(SB), h
afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
- ADDQ $24, BX
-
- CMPQ SI, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (SI), R8
- ADDQ $8, SI
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ DI, AX
-
- CMPQ SI, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ SI, BX
- JG singles
-
- MOVL (SI), R8
- ADDQ $4, SI
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ SI, BX
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
JGE finalize
-singlesLoop:
- MOVBQZX (SI), R12
- ADDQ $1, SI
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ SI, BX
- JL singlesLoop
+ CMPQ p, end
+ JL loop1
finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
RET
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
// Load slice.
- MOVQ b_base+8(FP), SI
- MOVQ b_len+16(FP), DX
- LEAQ (SI)(DX*1), BX
- SUBQ $32, BX
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
// Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
+ blockLoop()
// Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is SI minus the old base pointer.
- SUBQ b_base+8(FP), SI
- MOVQ SI, ret+32(FP)
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
RET
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
new file mode 100644
index 000000000000..7e3145a22186
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
similarity index 73%
rename from cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
rename to cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index ad14b807f4d9..9216e0a40c1a 100644
--- a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -1,3 +1,5 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
// +build !appengine
// +build gc
// +build !purego
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 4a5a821603e5..26df13bba4b7 100644
--- a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -1,4 +1,5 @@
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
package xxhash
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64
if n >= 32 {
- v1 := prime1v + prime2
+ v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
- v4 := -prime1v
+ v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n)
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index fc9bea7a31f2..e86f1b5fd8e4 100644
--- a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
diff --git a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 376e0ca2e497..1c1638fd88a1 100644
--- a/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/cluster-autoscaler/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
// This file encapsulates usage of unsafe.
@@ -11,7 +12,7 @@ import (
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/semver.go b/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/semver.go
index 76cf4852c769..eb9fb7ff2d8c 100644
--- a/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/semver.go
+++ b/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -85,7 +85,7 @@ func (v *Version) Set(version string) error {
return fmt.Errorf("failed to validate metadata: %v", err)
}
- parsed := make([]int64, 3, 3)
+ parsed := make([]int64, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
index 7a0e0d3a51b1..25d9c1aa9387 100644
--- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
+++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
@@ -30,8 +30,8 @@ import (
// It returns one of the following:
// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
// (0, err) - an error happened (e.g. error converting time).
-// (time, nil) - watchdog is enabled and we can send ping.
-// time is delay before inactive service will be killed.
+// (time, nil) - watchdog is enabled and we can send ping. time is delay
+// before inactive service will be killed.
func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
wusec := os.Getenv("WATCHDOG_USEC")
wpid := os.Getenv("WATCHDOG_PID")
diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
index cff5af1a64c3..147f756fe24e 100644
--- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
+++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
@@ -176,6 +176,11 @@ func (c *Conn) Close() {
c.sigconn.Close()
}
+// Connected returns whether conn is connected
+func (c *Conn) Connected() bool {
+ return c.sysconn.Connected() && c.sigconn.Connected()
+}
+
// NewConnection establishes a connection to a bus using a caller-supplied function.
// This allows connecting to remote buses through a user-supplied mechanism.
// The supplied function may be called multiple times, and should return independent connections.
diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
index fa04afc708e7..074148cb4d66 100644
--- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
+++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
@@ -417,6 +417,29 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
return status, nil
}
+// GetUnitByPID returns the unit object path of the unit a process ID
+// belongs to. It takes a UNIX PID and returns the object path. The PID must
+// refer to an existing system process
+func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) {
+ var result dbus.ObjectPath
+
+ err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result)
+
+ return result, err
+}
+
+// GetUnitNameByPID returns the name of the unit a process ID belongs to. It
+// takes a UNIX PID and returns the object path. The PID must refer to an
+// existing system process
+func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) {
+ path, err := c.GetUnitByPID(ctx, pid)
+ if err != nil {
+ return "", err
+ }
+
+ return unitName(path), nil
+}
+
// Deprecated: use ListUnitsContext instead.
func (c *Conn) ListUnits() ([]UnitStatus, error) {
return c.ListUnitsContext(context.Background())
@@ -828,3 +851,14 @@ func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) {
return status, nil
}
+
+// Freeze the cgroup associated with the unit.
+// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2.
+func (c *Conn) FreezeUnit(ctx context.Context, unit string) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store()
+}
+
+// Unfreeze the cgroup associated with the unit.
+func (c *Conn) ThawUnit(ctx context.Context, unit string) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store()
+}
diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
index 8d58ca0fbca0..c5b23a81968f 100644
--- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
+++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
// Package journal provides write bindings to the local systemd journal.
@@ -53,15 +54,9 @@ var (
onceConn sync.Once
)
-func init() {
- onceConn.Do(initConn)
-}
-
// Enabled checks whether the local systemd journal is available for logging.
func Enabled() bool {
- onceConn.Do(initConn)
-
- if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
+ if c := getOrInitConn(); c == nil {
return false
}
@@ -74,6 +69,58 @@ func Enabled() bool {
return true
}
+// StderrIsJournalStream returns whether the process stderr is connected
+// to the Journal's stream transport.
+//
+// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stderr's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
+func StderrIsJournalStream() (bool, error) {
+ return fdIsJournalStream(syscall.Stderr)
+}
+
+// StdoutIsJournalStream returns whether the process stdout is connected
+// to the Journal's stream transport.
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stdout's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// Most users should probably use [StderrIsJournalStream].
+func StdoutIsJournalStream() (bool, error) {
+ return fdIsJournalStream(syscall.Stdout)
+}
+
+func fdIsJournalStream(fd int) (bool, error) {
+ journalStream := os.Getenv("JOURNAL_STREAM")
+ if journalStream == "" {
+ return false, nil
+ }
+
+ var expectedStat syscall.Stat_t
+ _, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
+ if err != nil {
+ return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
+ }
+
+ var stat syscall.Stat_t
+ err = syscall.Fstat(fd, &stat)
+ if err != nil {
+ return false, err
+ }
+
+ match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
+ return match, nil
+}
+
// Send a message to the local systemd journal. vars is a map of journald
// fields to values. Fields must be composed of uppercase letters, numbers,
// and underscores, but must not start with an underscore. Within these
@@ -82,7 +129,7 @@ func Enabled() bool {
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
// for more details. vars may be nil.
func Send(message string, priority Priority, vars map[string]string) error {
- conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+ conn := getOrInitConn()
if conn == nil {
return errors.New("could not initialize socket to journald")
}
@@ -126,6 +173,16 @@ func Send(message string, priority Priority, vars map[string]string) error {
return nil
}
+// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary
+func getOrInitConn() *net.UnixConn {
+ conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+ if conn != nil {
+ return conn
+ }
+ onceConn.Do(initConn)
+ return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+}
+
func appendVariable(w io.Writer, name, value string) {
if err := validVarName(name); err != nil {
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
@@ -194,7 +251,7 @@ func tempFd() (*os.File, error) {
}
// initConn initializes the global `unixConnPtr` socket.
-// It is meant to be called exactly once, at program startup.
+// It is automatically called when needed.
func initConn() {
autobind, err := net.ResolveUnixAddr("unixgram", "")
if err != nil {
diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
index 677aca68ed20..322e41e74c3e 100644
--- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
+++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
@@ -33,3 +33,11 @@ func Enabled() bool {
func Send(message string, priority Priority, vars map[string]string) error {
return errors.New("could not initialize socket to journald")
}
+
+func StderrIsJournalStream() (bool, error) {
+ return false, nil
+}
+
+func StdoutIsJournalStream() (bool, error) {
+ return false, nil
+}
diff --git a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
index 74a378157a6f..352018e70370 100644
--- a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
+++ b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
@@ -1,10 +1,26 @@
# Change history of go-restful
-## [v3.9.0] - 20221-07-21
+## [v3.10.2] - 2023-03-09
+
+- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0
+ see comment in Readme how to customize this behaviour.
+
+## [v3.10.1] - 2022-11-19
+
+- fix broken 3.10.0 by using path package for joining paths
+
+## [v3.10.0] - 2022-10-11 - BROKEN
+
+- changed tokenizer to match std route match behavior; do not trimright the path (#511)
+- Add MIME_ZIP (#512)
+- Add MIME_ZIP and HEADER_ContentDisposition (#513)
+- Changed how to get query parameter issue #510
+
+## [v3.9.0] - 2022-07-21
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
-## [v3.8.0] - 20221-06-06
+## [v3.8.0] - 2022-06-06
- use exact matching of allowed domain entries, issue #489 (#493)
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
diff --git a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/README.md b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/README.md
index 0625359dc409..85da90128e4e 100644
--- a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/README.md
+++ b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/README.md
@@ -96,6 +96,10 @@ There are several hooks to customize the behavior of the go-restful package.
- Compression
- Encoders for other serializers
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
+- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path
+ - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore.
+ - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`.
+ - you can set value to a custom implementation (must implement MergePathStrategyFunc)
## Resources
diff --git a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/constants.go b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/constants.go
index 203439c5e5fd..2328bde6c7a8 100644
--- a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/constants.go
+++ b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/constants.go
@@ -7,12 +7,14 @@ package restful
const (
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
HEADER_Allow = "Allow"
HEADER_Accept = "Accept"
HEADER_Origin = "Origin"
HEADER_ContentType = "Content-Type"
+ HEADER_ContentDisposition = "Content-Disposition"
HEADER_LastModified = "Last-Modified"
HEADER_AcceptEncoding = "Accept-Encoding"
HEADER_ContentEncoding = "Content-Encoding"
diff --git a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/request.go b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/request.go
index 5725a0759581..0020095e8622 100644
--- a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/request.go
+++ b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/request.go
@@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request {
// a "Unable to unmarshal content of type:" response is returned.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
-// restful.DefaultRequestContentType(restful.MIME_JSON)
+//
+// restful.DefaultRequestContentType(restful.MIME_JSON)
func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
@@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string {
// QueryParameter returns the (first) Query parameter value by its name
func (r *Request) QueryParameter(name string) string {
- return r.Request.FormValue(name)
+ return r.Request.URL.Query().Get(name)
}
// QueryParameters returns the all the query parameters values by name
diff --git a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/response.go b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/response.go
index 8f0b56aa2dab..a41a92cc2c35 100644
--- a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/response.go
+++ b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/response.go
@@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
if DefaultResponseMimeType == MIME_XML {
return entityAccessRegistry.accessorAt(MIME_XML)
}
+ if DefaultResponseMimeType == MIME_ZIP {
+ return entityAccessRegistry.accessorAt(MIME_ZIP)
+ }
// Fallback to whatever the route says it can produce.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
for _, each := range r.routeProduces {
diff --git a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/route.go b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/route.go
index 193f4a6b014c..ea05b3da887b 100644
--- a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/route.go
+++ b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/route.go
@@ -164,7 +164,7 @@ func tokenizePath(path string) []string {
if "/" == path {
return nil
}
- return strings.Split(strings.Trim(path, "/"), "/")
+ return strings.Split(strings.TrimLeft(path, "/"), "/")
}
// for debugging
@@ -176,3 +176,5 @@ func (r *Route) String() string {
func (r *Route) EnableContentEncoding(enabled bool) {
r.contentEncodingEnabled = &enabled
}
+
+var TrimRightSlashEnabled = false
diff --git a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/route_builder.go
index 23641b6dd54a..827f471de000 100644
--- a/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/route_builder.go
+++ b/cluster-autoscaler/vendor/github.com/emicklei/go-restful/v3/route_builder.go
@@ -7,6 +7,7 @@ package restful
import (
"fmt"
"os"
+ "path"
"reflect"
"runtime"
"strings"
@@ -46,11 +47,12 @@ type RouteBuilder struct {
// Do evaluates each argument with the RouteBuilder itself.
// This allows you to follow DRY principles without breaking the fluent programming style.
// Example:
-// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
//
-// func Returns500(b *RouteBuilder) {
-// b.Returns(500, "Internal Server Error", restful.ServiceError{})
-// }
+// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
+//
+// func Returns500(b *RouteBuilder) {
+// b.Returns(500, "Internal Server Error", restful.ServiceError{})
+// }
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
for _, each := range oneArgBlocks {
each(b)
@@ -351,8 +353,28 @@ func (b *RouteBuilder) Build() Route {
return route
}
-func concatPath(path1, path2 string) string {
- return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
+type MergePathStrategyFunc func(rootPath, routePath string) string
+
+var (
+ // behavior >= 3.10
+ PathJoinStrategy = func(rootPath, routePath string) string {
+ return path.Join(rootPath, routePath)
+ }
+
+ // behavior <= 3.9
+ TrimSlashStrategy = func(rootPath, routePath string) string {
+ return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/")
+ }
+
+ // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices.
+ // The value is set to PathJoinStrategy
+ // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227]
+ MergePathStrategy = PathJoinStrategy
+)
+
+// merge two paths using the current (package global) merge path strategy.
+func concatPath(rootPath, routePath string) string {
+ return MergePathStrategy(rootPath, routePath)
}
var anonymousFuncCount int32
diff --git a/cluster-autoscaler/vendor/github.com/go-logr/logr/.golangci.yaml b/cluster-autoscaler/vendor/github.com/go-logr/logr/.golangci.yaml
index 94ff801df1ac..0cffafa7bf94 100644
--- a/cluster-autoscaler/vendor/github.com/go-logr/logr/.golangci.yaml
+++ b/cluster-autoscaler/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -6,7 +6,6 @@ linters:
disable-all: true
enable:
- asciicheck
- - deadcode
- errcheck
- forcetypeassert
- gocritic
@@ -18,10 +17,8 @@ linters:
- misspell
- revive
- staticcheck
- - structcheck
- typecheck
- unused
- - varcheck
issues:
exclude-use-default: false
diff --git a/cluster-autoscaler/vendor/github.com/go-logr/logr/discard.go b/cluster-autoscaler/vendor/github.com/go-logr/logr/discard.go
index 9d92a38f1d75..99fe8be93c1b 100644
--- a/cluster-autoscaler/vendor/github.com/go-logr/logr/discard.go
+++ b/cluster-autoscaler/vendor/github.com/go-logr/logr/discard.go
@@ -20,35 +20,5 @@ package logr
// used whenever the caller is not interested in the logs. Logger instances
// produced by this function always compare as equal.
func Discard() Logger {
- return Logger{
- level: 0,
- sink: discardLogSink{},
- }
-}
-
-// discardLogSink is a LogSink that discards all messages.
-type discardLogSink struct{}
-
-// Verify that it actually implements the interface
-var _ LogSink = discardLogSink{}
-
-func (l discardLogSink) Init(RuntimeInfo) {
-}
-
-func (l discardLogSink) Enabled(int) bool {
- return false
-}
-
-func (l discardLogSink) Info(int, string, ...interface{}) {
-}
-
-func (l discardLogSink) Error(error, string, ...interface{}) {
-}
-
-func (l discardLogSink) WithValues(...interface{}) LogSink {
- return l
-}
-
-func (l discardLogSink) WithName(string) LogSink {
- return l
+ return New(nil)
}
diff --git a/cluster-autoscaler/vendor/github.com/go-logr/logr/funcr/funcr.go b/cluster-autoscaler/vendor/github.com/go-logr/logr/funcr/funcr.go
index 7accdb0c4003..e52f0cd01e2e 100644
--- a/cluster-autoscaler/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/cluster-autoscaler/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -21,13 +21,13 @@ limitations under the License.
// github.com/go-logr/logr.LogSink with output through an arbitrary
// "write" function. See New and NewJSON for details.
//
-// Custom LogSinks
+// # Custom LogSinks
//
// For users who need more control, a funcr.Formatter can be embedded inside
// your own custom LogSink implementation. This is useful when the LogSink
// needs to implement additional methods, for example.
//
-// Formatting
+// # Formatting
//
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
// values which are being logged. When rendering a struct, funcr will use Go's
@@ -37,6 +37,7 @@ package funcr
import (
"bytes"
"encoding"
+ "encoding/json"
"fmt"
"path/filepath"
"reflect"
@@ -217,7 +218,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
prefix: "",
values: nil,
depth: 0,
- opts: opts,
+ opts: &opts,
}
return f
}
@@ -231,7 +232,7 @@ type Formatter struct {
values []interface{}
valuesStr string
depth int
- opts Options
+ opts *Options
}
// outputFormat indicates which outputFormat to use.
@@ -447,6 +448,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
for i := 0; i < t.NumField(); i++ {
fld := t.Field(i)
if fld.PkgPath != "" {
@@ -478,9 +480,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
if omitempty && isEmpty(v.Field(i)) {
continue
}
- if i > 0 {
+ if printComma {
buf.WriteByte(',')
}
+ printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
continue
@@ -500,6 +503,20 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
}
return buf.String()
case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
diff --git a/cluster-autoscaler/vendor/github.com/go-logr/logr/logr.go b/cluster-autoscaler/vendor/github.com/go-logr/logr/logr.go
index c3b56b3d2c5e..e027aea3fd38 100644
--- a/cluster-autoscaler/vendor/github.com/go-logr/logr/logr.go
+++ b/cluster-autoscaler/vendor/github.com/go-logr/logr/logr.go
@@ -21,7 +21,7 @@ limitations under the License.
// to back that API. Packages in the Go ecosystem can depend on this package,
// while callers can implement logging with whatever backend is appropriate.
//
-// Usage
+// # Usage
//
// Logging is done using a Logger instance. Logger is a concrete type with
// methods, which defers the actual logging to a LogSink interface. The main
@@ -30,16 +30,20 @@ limitations under the License.
// "structured logging".
//
// With Go's standard log package, we might write:
-// log.Printf("setting target value %s", targetValue)
+//
+// log.Printf("setting target value %s", targetValue)
//
// With logr's structured logging, we'd write:
-// logger.Info("setting target", "value", targetValue)
+//
+// logger.Info("setting target", "value", targetValue)
//
// Errors are much the same. Instead of:
-// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
//
// We'd write:
-// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// logger.Error(err, "failed to open the pod bay door", "user", user)
//
// Info() and Error() are very similar, but they are separate methods so that
// LogSink implementations can choose to do things like attach additional
@@ -47,7 +51,7 @@ limitations under the License.
// always logged, regardless of the current verbosity. If there is no error
// instance available, passing nil is valid.
//
-// Verbosity
+// # Verbosity
//
// Often we want to log information only when the application in "verbose
// mode". To write log lines that are more verbose, Logger has a V() method.
@@ -58,20 +62,22 @@ limitations under the License.
// Error messages do not have a verbosity level and are always logged.
//
// Where we might have written:
-// if flVerbose >= 2 {
-// log.Printf("an unusual thing happened")
-// }
+//
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
//
// We can write:
-// logger.V(2).Info("an unusual thing happened")
//
-// Logger Names
+// logger.V(2).Info("an unusual thing happened")
+//
+// # Logger Names
//
// Logger instances can have name strings so that all messages logged through
// that instance have additional context. For example, you might want to add
// a subsystem name:
//
-// logger.WithName("compactor").Info("started", "time", time.Now())
+// logger.WithName("compactor").Info("started", "time", time.Now())
//
// The WithName() method returns a new Logger, which can be passed to
// constructors or other functions for further use. Repeated use of WithName()
@@ -82,25 +88,27 @@ limitations under the License.
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
// quotes, etc).
//
-// Saved Values
+// # Saved Values
//
// Logger instances can store any number of key/value pairs, which will be
// logged alongside all messages logged through that instance. For example,
// you might want to create a Logger instance per managed object:
//
// With the standard log package, we might write:
-// log.Printf("decided to set field foo to value %q for object %s/%s",
-// targetValue, object.Namespace, object.Name)
+//
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
//
// With logr we'd write:
-// // Elsewhere: set up the logger to log the object name.
-// obj.logger = mainLogger.WithValues(
-// "name", obj.name, "namespace", obj.namespace)
//
-// // later on...
-// obj.logger.Info("setting foo", "value", targetValue)
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
+//
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
//
-// Best Practices
+// # Best Practices
//
// Logger has very few hard rules, with the goal that LogSink implementations
// might have a lot of freedom to differentiate. There are, however, some
@@ -124,15 +132,15 @@ limitations under the License.
// around. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
-// Key Naming Conventions
+// # Key Naming Conventions
//
// Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they:
-// * be human-readable and meaningful (not auto-generated or simple ordinals)
-// * be constant (not dependent on input data)
-// * contain only printable characters
-// * not contain whitespace or punctuation
-// * use lower case for simple keys and lowerCamelCase for more complex ones
+// - be human-readable and meaningful (not auto-generated or simple ordinals)
+// - be constant (not dependent on input data)
+// - contain only printable characters
+// - not contain whitespace or punctuation
+// - use lower case for simple keys and lowerCamelCase for more complex ones
//
// These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to
@@ -141,51 +149,54 @@ limitations under the License.
// While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
-// * "caller": the calling information (file/line) of a particular log line
-// * "error": the underlying error value in the `Error` method
-// * "level": the log level
-// * "logger": the name of the associated logger
-// * "msg": the log message
-// * "stacktrace": the stack trace associated with a particular log line or
-// error (often from the `Error` message)
-// * "ts": the timestamp for a log line
+// - "caller": the calling information (file/line) of a particular log line
+// - "error": the underlying error value in the `Error` method
+// - "level": the log level
+// - "logger": the name of the associated logger
+// - "msg": the log message
+// - "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// - "ts": the timestamp for a log line
//
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
//
-// Break Glass
+// # Break Glass
//
// Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is:
-// // Underlier exposes access to the underlying logging implementation.
-// // Since callers only have a logr.Logger, they have to know which
-// // implementation is in use, so this interface is less of an abstraction
-// // and more of way to test type conversion.
-// type Underlier interface {
-// GetUnderlying()
-// }
+//
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying()
+// }
//
// Logger grants access to the sink to enable type assertions like this:
-// func DoSomethingWithImpl(log logr.Logger) {
-// if underlier, ok := log.GetSink()(impl.Underlier) {
-// implLogger := underlier.GetUnderlying()
-// ...
-// }
-// }
+//
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink().(impl.Underlier); ok {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
//
// Custom `With*` functions can be implemented by copying the complete
// Logger struct and replacing the sink in the copy:
-// // WithFooBar changes the foobar parameter in the log sink and returns a
-// // new logger with that modified sink. It does nothing for loggers where
-// // the sink doesn't support that parameter.
-// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
-// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
-// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
-// }
-// return log
-// }
+//
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
//
// Don't use New to construct a new Logger with a LogSink retrieved from an
// existing Logger. Source code attribution might not work correctly and
@@ -201,11 +212,14 @@ import (
)
// New returns a new Logger instance. This is primarily used by libraries
-// implementing LogSink, rather than end users.
+// implementing LogSink, rather than end users. Passing a nil sink will create
+// a Logger which discards all log lines.
func New(sink LogSink) Logger {
logger := Logger{}
logger.setSink(sink)
- sink.Init(runtimeInfo)
+ if sink != nil {
+ sink.Init(runtimeInfo)
+ }
return logger
}
@@ -244,7 +258,7 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
- return l.sink.Enabled(l.level)
+ return l.sink != nil && l.sink.Enabled(l.level)
}
// Info logs a non-error message with the given key/value pairs as context.
@@ -254,6 +268,9 @@ func (l Logger) Enabled() bool {
// information. The key/value pairs must alternate string keys and arbitrary
// values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
+ if l.sink == nil {
+ return
+ }
if l.Enabled() {
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
@@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
+ if l.sink == nil {
+ return
+ }
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
// level means a log message is less important. Negative V-levels are treated
// as 0.
func (l Logger) V(level int) Logger {
+ if l.sink == nil {
+ return l
+ }
if level < 0 {
level = 0
}
@@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger {
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
+ if l.sink == nil {
+ return l
+ }
l.setSink(l.sink.WithValues(keysAndValues...))
return l
}
@@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
// contain only letters, digits, and hyphens (see the package documentation for
// more information).
func (l Logger) WithName(name string) Logger {
+ if l.sink == nil {
+ return l
+ }
l.setSink(l.sink.WithName(name))
return l
}
@@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger {
// WithCallDepth(1) because it works with implementions that support the
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
func (l Logger) WithCallDepth(depth int) Logger {
+ if l.sink == nil {
+ return l
+ }
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(depth))
}
@@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger {
// implementation does not support either of these, the original Logger will be
// returned.
func (l Logger) WithCallStackHelper() (func(), Logger) {
+ if l.sink == nil {
+ return func() {}, l
+ }
var helper func()
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(1))
@@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) {
return helper, l
}
+// IsZero returns true if this logger is an uninitialized zero value
+func (l Logger) IsZero() bool {
+ return l.sink == nil
+}
+
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
@@ -442,7 +482,7 @@ type LogSink interface {
WithName(name string) LogSink
}
-// CallDepthLogSink represents a Logger that knows how to climb the call stack
+// CallDepthLogSink represents a LogSink that knows how to climb the call stack
// to identify the original call site and can offset the depth by a specified
// number of frames. This is useful for users who have helper functions
// between the "real" call site and the actual calls to Logger methods.
@@ -467,7 +507,7 @@ type CallDepthLogSink interface {
WithCallDepth(depth int) LogSink
}
-// CallStackHelperLogSink represents a Logger that knows how to climb
+// CallStackHelperLogSink represents a LogSink that knows how to climb
// the call stack to identify the original call site and can skip
// intermediate helper functions if they mark themselves as
// helper. Go's testing package uses that approach.
diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
index fb376fce2da1..f0610cf1e577 100644
--- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
+++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
@@ -26,11 +26,16 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
// - FlagLowercaseHost
// - FlagRemoveDefaultPort
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
+//
+// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment.
func NormalizeURL(u *url.URL) {
lowercaseScheme(u)
lowercaseHost(u)
removeDefaultPort(u)
removeDuplicateSlashes(u)
+
+ u.RawPath = ""
+ u.RawFragment = ""
}
func lowercaseScheme(u *url.URL) {
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/README.md b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/README.md
index 3072d24a9da6..f5d551ca8fd8 100644
--- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/README.md
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/README.md
@@ -36,9 +36,23 @@ The part in the middle is the interesting bit. It's called the Claims and conta
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v4
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v4"
+```
+
## Examples
-See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage:
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage:
* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac)
* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac)
@@ -46,9 +60,17 @@ See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) fo
## Extensions
-This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
+This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`.
+
+A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards.
-Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
+| Extension | Purpose | Repo |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
+| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers
## Compliance
@@ -112,3 +134,5 @@ This library uses descriptive error messages whenever possible. If you are not g
Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt).
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
new file mode 100644
index 000000000000..b08402c3427f
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of February 2022 (and until this document is updated), the latest version `v4` is supported.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/claims.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/claims.go
index 41cc826563ba..9d95cad2bf27 100644
--- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/claims.go
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/claims.go
@@ -56,17 +56,17 @@ func (c RegisteredClaims) Valid() error {
// default value in Go, let's not fail the verification for them.
if !c.VerifyExpiresAt(now, false) {
delta := now.Sub(c.ExpiresAt.Time)
- vErr.Inner = fmt.Errorf("token is expired by %v", delta)
+ vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
vErr.Errors |= ValidationErrorExpired
}
if !c.VerifyIssuedAt(now, false) {
- vErr.Inner = fmt.Errorf("token used before issued")
+ vErr.Inner = ErrTokenUsedBeforeIssued
vErr.Errors |= ValidationErrorIssuedAt
}
if !c.VerifyNotBefore(now, false) {
- vErr.Inner = fmt.Errorf("token is not valid yet")
+ vErr.Inner = ErrTokenNotValidYet
vErr.Errors |= ValidationErrorNotValidYet
}
@@ -149,17 +149,17 @@ func (c StandardClaims) Valid() error {
// default value in Go, let's not fail the verification for them.
if !c.VerifyExpiresAt(now, false) {
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
- vErr.Inner = fmt.Errorf("token is expired by %v", delta)
+ vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
vErr.Errors |= ValidationErrorExpired
}
if !c.VerifyIssuedAt(now, false) {
- vErr.Inner = fmt.Errorf("token used before issued")
+ vErr.Inner = ErrTokenUsedBeforeIssued
vErr.Errors |= ValidationErrorIssuedAt
}
if !c.VerifyNotBefore(now, false) {
- vErr.Inner = fmt.Errorf("token is not valid yet")
+ vErr.Inner = ErrTokenNotValidYet
vErr.Errors |= ValidationErrorNotValidYet
}
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/errors.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/errors.go
index b9d18e498e3f..10ac8835cc88 100644
--- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/errors.go
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/errors.go
@@ -9,6 +9,18 @@ var (
ErrInvalidKey = errors.New("key is invalid")
ErrInvalidKeyType = errors.New("key is of invalid type")
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+
+ ErrTokenMalformed = errors.New("token is malformed")
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+ ErrTokenInvalidId = errors.New("token has invalid id")
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
)
// The errors that might occur when parsing and validating a token
@@ -62,3 +74,39 @@ func (e *ValidationError) Unwrap() error {
func (e *ValidationError) valid() bool {
return e.Errors == 0
}
+
+// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message
+// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use
+// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables.
+func (e *ValidationError) Is(err error) bool {
+ // Check, if our inner error is a direct match
+ if errors.Is(errors.Unwrap(e), err) {
+ return true
+ }
+
+ // Otherwise, we need to match using our error flags
+ switch err {
+ case ErrTokenMalformed:
+ return e.Errors&ValidationErrorMalformed != 0
+ case ErrTokenUnverifiable:
+ return e.Errors&ValidationErrorUnverifiable != 0
+ case ErrTokenSignatureInvalid:
+ return e.Errors&ValidationErrorSignatureInvalid != 0
+ case ErrTokenInvalidAudience:
+ return e.Errors&ValidationErrorAudience != 0
+ case ErrTokenExpired:
+ return e.Errors&ValidationErrorExpired != 0
+ case ErrTokenUsedBeforeIssued:
+ return e.Errors&ValidationErrorIssuedAt != 0
+ case ErrTokenInvalidIssuer:
+ return e.Errors&ValidationErrorIssuer != 0
+ case ErrTokenNotValidYet:
+ return e.Errors&ValidationErrorNotValidYet != 0
+ case ErrTokenInvalidId:
+ return e.Errors&ValidationErrorId != 0
+ case ErrTokenInvalidClaims:
+ return e.Errors&ValidationErrorClaimsInvalid != 0
+ }
+
+ return false
+}
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
index e7da633b93c6..2700d64a0d09 100644
--- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
@@ -126,16 +126,19 @@ func (m MapClaims) Valid() error {
now := TimeFunc().Unix()
if !m.VerifyExpiresAt(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenExpired
vErr.Inner = errors.New("Token is expired")
vErr.Errors |= ValidationErrorExpired
}
if !m.VerifyIssuedAt(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued
vErr.Inner = errors.New("Token used before issued")
vErr.Errors |= ValidationErrorIssuedAt
}
if !m.VerifyNotBefore(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenNotValidYet
vErr.Inner = errors.New("Token is not valid yet")
vErr.Errors |= ValidationErrorNotValidYet
}
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/parser_option.go
index 0fede4f15c90..6ea6f9527de6 100644
--- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/parser_option.go
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/parser_option.go
@@ -1,6 +1,6 @@
package jwt
-// ParserOption is used to implement functional-style options that modify the behaviour of the parser. To add
+// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add
// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that
// takes a *Parser type as input and manipulates its configuration accordingly.
type ParserOption func(*Parser)
@@ -13,7 +13,7 @@ func WithValidMethods(methods []string) ParserOption {
}
}
-// WithJSONNumber is an option to configure the underyling JSON parser with UseNumber
+// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber
func WithJSONNumber() ParserOption {
return func(p *Parser) {
p.UseJSONNumber = true
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
index 5a8502feb34b..4fd6f9e610b0 100644
--- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
@@ -1,3 +1,4 @@
+//go:build go1.4
// +build go1.4
package jwt
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/token.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/token.go
index 12344138bedc..3cb0f3f0e4c0 100644
--- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/token.go
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/token.go
@@ -7,7 +7,6 @@ import (
"time"
)
-
// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515
// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations
// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global
@@ -74,22 +73,19 @@ func (t *Token) SignedString(key interface{}) (string, error) {
// the SignedString.
func (t *Token) SigningString() (string, error) {
var err error
- parts := make([]string, 2)
- for i := range parts {
- var jsonValue []byte
- if i == 0 {
- if jsonValue, err = json.Marshal(t.Header); err != nil {
- return "", err
- }
- } else {
- if jsonValue, err = json.Marshal(t.Claims); err != nil {
- return "", err
- }
- }
+ var jsonValue []byte
- parts[i] = EncodeSegment(jsonValue)
+ if jsonValue, err = json.Marshal(t.Header); err != nil {
+ return "", err
}
- return strings.Join(parts, "."), nil
+ header := EncodeSegment(jsonValue)
+
+ if jsonValue, err = json.Marshal(t.Claims); err != nil {
+ return "", err
+ }
+ claim := EncodeSegment(jsonValue)
+
+ return strings.Join([]string{header, claim}, "."), nil
}
// Parse parses, validates, verifies the signature and returns the parsed token.
diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/types.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/types.go
index 80b1b96948ec..ac8e140eb119 100644
--- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/types.go
+++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/types.go
@@ -49,9 +49,27 @@ func newNumericDateFromSeconds(f float64) *NumericDate {
// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
func (date NumericDate) MarshalJSON() (b []byte, err error) {
- f := float64(date.Truncate(TimePrecision).UnixNano()) / float64(time.Second)
-
- return []byte(strconv.FormatFloat(f, 'f', -1, 64)), nil
+ var prec int
+ if TimePrecision < time.Second {
+ prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+ }
+ truncatedDate := date.Truncate(TimePrecision)
+
+ // For very large timestamps, UnixNano would overflow an int64, but this
+ // function requires nanosecond level precision, so we have to use the
+ // following technique to get round the issue:
+ // 1. Take the normal unix timestamp to form the whole number part of the
+ // output,
+ // 2. Take the result of the Nanosecond function, which retuns the offset
+ // within the second of the particular unix time instance, to form the
+ // decimal part of the output
+ // 3. Concatenate them to produce the final result
+ seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+ nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+ output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+ return output, nil
}
// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a
diff --git a/cluster-autoscaler/vendor/github.com/golang/protobuf/jsonpb/decode.go b/cluster-autoscaler/vendor/github.com/golang/protobuf/jsonpb/decode.go
index 60e82caa9a2d..6c16c255ffba 100644
--- a/cluster-autoscaler/vendor/github.com/golang/protobuf/jsonpb/decode.go
+++ b/cluster-autoscaler/vendor/github.com/golang/protobuf/jsonpb/decode.go
@@ -386,8 +386,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error
}
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
+ if fd.Cardinality() == protoreflect.Repeated {
+ return false
+ }
if md := fd.Message(); md != nil {
- return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
+ return md.FullName() == "google.protobuf.Value"
+ }
+ if ed := fd.Enum(); ed != nil {
+ return ed.FullName() == "google.protobuf.NullValue"
}
return false
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cloudinfo/aws/aws.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cloudinfo/aws/aws.go
deleted file mode 100644
index 01aee36dd49e..000000000000
--- a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cloudinfo/aws/aws.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudinfo
-
-import (
- "io/ioutil"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/session"
-
- info "github.com/google/cadvisor/info/v1"
- "github.com/google/cadvisor/utils/cloudinfo"
-)
-
-const (
- productVerFileName = "/sys/class/dmi/id/product_version"
- biosVerFileName = "/sys/class/dmi/id/bios_vendor"
- systemdOSReleaseFileName = "/etc/os-release"
- amazon = "amazon"
-)
-
-func init() {
- cloudinfo.RegisterCloudProvider(info.AWS, &provider{})
-}
-
-type provider struct{}
-
-var _ cloudinfo.CloudProvider = provider{}
-
-func (provider) IsActiveProvider() bool {
- return fileContainsAmazonIdentifier(productVerFileName) ||
- fileContainsAmazonIdentifier(biosVerFileName) ||
- fileContainsAmazonIdentifier(systemdOSReleaseFileName)
-}
-
-func fileContainsAmazonIdentifier(filename string) bool {
- fileContent, err := ioutil.ReadFile(filename)
- if err != nil {
- return false
- }
-
- return strings.Contains(string(fileContent), amazon)
-}
-
-func getAwsMetadata(name string) string {
- sess, err := session.NewSession(&aws.Config{})
- if err != nil {
- return info.UnknownInstance
- }
- client := ec2metadata.New(sess)
- data, err := client.GetMetadata(name)
- if err != nil {
- return info.UnknownInstance
- }
- return data
-}
-
-func (provider) GetInstanceType() info.InstanceType {
- return info.InstanceType(getAwsMetadata("instance-type"))
-}
-
-func (provider) GetInstanceID() info.InstanceID {
- return info.InstanceID(getAwsMetadata("instance-id"))
-}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/BUILD.bazel
index e973abfc547a..ddddbd2804e2 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/BUILD.bazel
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/BUILD.bazel
@@ -23,6 +23,7 @@ go_library(
"//checker/decls:go_default_library",
"//common:go_default_library",
"//common/containers:go_default_library",
+ "//common/operators:go_default_library",
"//common/overloads:go_default_library",
"//common/types:go_default_library",
"//common/types/pb:go_default_library",
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/decls.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/decls.go
index f2df721d0766..c0624d1e5963 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/decls.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/decls.go
@@ -139,7 +139,7 @@ var (
kind: TypeKind,
runtimeType: types.TypeType,
}
- //UintType represents a uint type.
+ // UintType represents a uint type.
UintType = &Type{
kind: UintKind,
runtimeType: types.UintType,
@@ -222,7 +222,8 @@ func (t *Type) equals(other *Type) bool {
// - The from types are the same instance
// - The target type is dynamic
// - The fromType has the same kind and type name as the target type, and all parameters of the target type
-// are IsAssignableType() from the parameters of the fromType.
+//
+// are IsAssignableType() from the parameters of the fromType.
func (t *Type) defaultIsAssignableType(fromType *Type) bool {
if t == fromType || t.isDyn() {
return true
@@ -312,6 +313,11 @@ func NullableType(wrapped *Type) *Type {
}
}
+// OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
+func OptionalType(param *Type) *Type {
+ return OpaqueType("optional", param)
+}
+
// OpaqueType creates an abstract parameterized type with a given name.
func OpaqueType(name string, params ...*Type) *Type {
return &Type{
@@ -365,7 +371,9 @@ func Variable(name string, t *Type) EnvOption {
//
// - Overloads are searched in the order they are declared
// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents
-// at runtime. Empty lists and maps will result in a 'default dispatch'
+//
+// at runtime. Empty lists and maps will result in a 'default dispatch'
+//
// - In the event that a default dispatch occurs, the first overload provided is the one invoked
//
// If you intend to use overloads which differentiate based on the key or element type of a list or
@@ -405,7 +413,7 @@ func Function(name string, opts ...FunctionOpt) EnvOption {
// FunctionOpt defines a functional option for configuring a function declaration.
type FunctionOpt func(*functionDecl) (*functionDecl, error)
-// SingletonUnaryBinding creates a singleton function defintion to be used for all function overloads.
+// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
@@ -431,7 +439,17 @@ func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonBinaryBinding
func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ return SingletonBinaryBinding(fn, traits...)
+}
+
+// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
trait := 0
for _, t := range traits {
trait = trait | t
@@ -453,7 +471,17 @@ func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonFunctionBinding
func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ return SingletonFunctionBinding(fn, traits...)
+}
+
+// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
trait := 0
for _, t := range traits {
trait = trait | t
@@ -720,9 +748,8 @@ func (f *functionDecl) addOverload(overload *overloadDecl) error {
// Allow redefinition of an overload implementation so long as the signatures match.
f.overloads[index] = overload
return nil
- } else {
- return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id)
}
+ return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id)
}
}
f.overloads = append(f.overloads, overload)
@@ -1177,3 +1204,43 @@ func collectParamNames(paramNames map[string]struct{}, arg *Type) {
collectParamNames(paramNames, param)
}
}
+
+func typeValueToKind(tv *types.TypeValue) (Kind, error) {
+ switch tv {
+ case types.BoolType:
+ return BoolKind, nil
+ case types.DoubleType:
+ return DoubleKind, nil
+ case types.IntType:
+ return IntKind, nil
+ case types.UintType:
+ return UintKind, nil
+ case types.ListType:
+ return ListKind, nil
+ case types.MapType:
+ return MapKind, nil
+ case types.StringType:
+ return StringKind, nil
+ case types.BytesType:
+ return BytesKind, nil
+ case types.DurationType:
+ return DurationKind, nil
+ case types.TimestampType:
+ return TimestampKind, nil
+ case types.NullType:
+ return NullTypeKind, nil
+ case types.TypeType:
+ return TypeKind, nil
+ default:
+ switch tv.TypeName() {
+ case "dyn":
+ return DynKind, nil
+ case "google.protobuf.Any":
+ return AnyKind, nil
+ case "optional":
+ return OpaqueKind, nil
+ default:
+ return 0, fmt.Errorf("no known conversion for type of %s", tv.TypeName())
+ }
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/env.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/env.go
index 4e9ecdd648ac..8cf442ee714b 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/env.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/env.go
@@ -102,9 +102,11 @@ type Env struct {
provider ref.TypeProvider
features map[int]bool
appliedFeatures map[int]bool
+ libraries map[string]bool
// Internal parser representation
- prsr *parser.Parser
+ prsr *parser.Parser
+ prsrOpts []parser.Option
// Internal checker representation
chk *checker.Env
@@ -159,6 +161,7 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
provider: registry,
features: map[int]bool{},
appliedFeatures: map[int]bool{},
+ libraries: map[string]bool{},
progOpts: []ProgramOption{},
}).configure(opts)
}
@@ -240,6 +243,9 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
return nil, e.chkErr
}
+ prsrOptsCopy := make([]parser.Option, len(e.prsrOpts))
+ copy(prsrOptsCopy, e.prsrOpts)
+
// The type-checker is configured with Declarations. The declarations may either be provided
// as options which have not yet been validated, or may come from a previous checker instance
// whose types have already been validated.
@@ -250,7 +256,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
decsCopy := []*exprpb.Decl{}
if e.chk != nil {
// If the type-checker has already been instantiated, then the e.declarations have been
- // valdiated within the chk instance.
+ // validated within the chk instance.
chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(e.chk))
} else {
// If the type-checker has not been instantiated, ensure the unvalidated declarations are
@@ -304,8 +310,11 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
for k, v := range e.functions {
funcsCopy[k] = v
}
+ libsCopy := make(map[string]bool, len(e.libraries))
+ for k, v := range e.libraries {
+ libsCopy[k] = v
+ }
- // TODO: functions copy needs to happen here.
ext := &Env{
Container: e.Container,
declarations: decsCopy,
@@ -315,8 +324,10 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
adapter: adapter,
features: featuresCopy,
appliedFeatures: appliedFeaturesCopy,
+ libraries: libsCopy,
provider: provider,
chkOpts: chkOptsCopy,
+ prsrOpts: prsrOptsCopy,
}
return ext.configure(opts)
}
@@ -328,6 +339,12 @@ func (e *Env) HasFeature(flag int) bool {
return has && enabled
}
+// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment.
+func (e *Env) HasLibrary(libName string) bool {
+ configured, exists := e.libraries[libName]
+ return exists && configured
+}
+
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
//
// This form of Parse creates a Source value for the input `txt` and forwards to the
@@ -422,8 +439,8 @@ func (e *Env) UnknownVars() interpreter.PartialActivation {
// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
// Ast format and then Program again.
func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
- pruned := interpreter.PruneAst(a.Expr(), details.State())
- expr, err := AstToString(ParsedExprToAst(&exprpb.ParsedExpr{Expr: pruned}))
+ pruned := interpreter.PruneAst(a.Expr(), a.SourceInfo().GetMacroCalls(), details.State())
+ expr, err := AstToString(ParsedExprToAst(pruned))
if err != nil {
return nil, err
}
@@ -464,17 +481,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
}
// If the default UTC timezone fix has been enabled, make sure the library is configured
- if e.HasFeature(featureDefaultUTCTimeZone) {
- if _, found := e.appliedFeatures[featureDefaultUTCTimeZone]; !found {
- e, err = Lib(timeUTCLibrary{})(e)
- if err != nil {
- return nil, err
- }
- // record that the feature has been applied since it will generate declarations
- // and functions which will be propagated on Extend() calls and which should only
- // be registered once.
- e.appliedFeatures[featureDefaultUTCTimeZone] = true
- }
+ e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{}))
+ if err != nil {
+ return nil, err
}
// Initialize all of the functions configured within the environment.
@@ -486,7 +495,10 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
}
// Configure the parser.
- prsrOpts := []parser.Option{parser.Macros(e.macros...)}
+ prsrOpts := []parser.Option{}
+ prsrOpts = append(prsrOpts, e.prsrOpts...)
+ prsrOpts = append(prsrOpts, parser.Macros(e.macros...))
+
if e.HasFeature(featureEnableMacroCallTracking) {
prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
}
@@ -546,6 +558,27 @@ func (e *Env) initChecker() error {
return e.chkErr
}
+// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies
+// the feature if it has not already been enabled.
+func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
+ if !e.HasFeature(feature) {
+ return e, nil
+ }
+ _, applied := e.appliedFeatures[feature]
+ if applied {
+ return e, nil
+ }
+ e, err := option(e)
+ if err != nil {
+ return nil, err
+ }
+ // record that the feature has been applied since it will generate declarations
+ // and functions which will be propagated on Extend() calls and which should only
+ // be registered once.
+ e.appliedFeatures[feature] = true
+ return e, nil
+}
+
// Issues defines methods for inspecting the error details of parse and check calls.
//
// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/io.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/io.go
index e721c97f6671..93ded3cf1b7e 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/io.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/io.go
@@ -19,14 +19,14 @@ import (
"fmt"
"reflect"
+ "google.golang.org/protobuf/proto"
+
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/parser"
- "google.golang.org/protobuf/proto"
-
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
anypb "google.golang.org/protobuf/types/known/anypb"
)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/library.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/library.go
index 5ca528459a8c..072cec30e6ff 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/library.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/library.go
@@ -20,10 +20,14 @@ import (
"time"
"github.com/google/cel-go/checker"
+ "github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
+ "github.com/google/cel-go/parser"
)
// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL
@@ -42,10 +46,27 @@ type Library interface {
ProgramOptions() []ProgramOption
}
+// SingletonLibrary refines the Library interface to ensure that libraries in this format are only
+// configured once within the environment.
+type SingletonLibrary interface {
+ Library
+
+ // LibraryName provides a namespaced name which is used to check whether the library has already
+ // been configured in the environment.
+ LibraryName() string
+}
+
// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
// and to be linked to each other.
func Lib(l Library) EnvOption {
+ singleton, isSingleton := l.(SingletonLibrary)
return func(e *Env) (*Env, error) {
+ if isSingleton {
+ if e.HasLibrary(singleton.LibraryName()) {
+ return e, nil
+ }
+ e.libraries[singleton.LibraryName()] = true
+ }
var err error
for _, opt := range l.CompileOptions() {
e, err = opt(e)
@@ -67,6 +88,11 @@ func StdLib() EnvOption {
// features documented in the specification.
type stdLibrary struct{}
+// LibraryName implements the SingletonLibrary interface method.
+func (stdLibrary) LibraryName() string {
+ return "cel.lib.std"
+}
+
// EnvOptions returns options for the standard CEL function declarations and macros.
func (stdLibrary) CompileOptions() []EnvOption {
return []EnvOption{
@@ -82,6 +108,191 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
}
}
+type optionalLibrary struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (optionalLibrary) LibraryName() string {
+ return "cel.lib.optional"
+}
+
+// CompileOptions implements the Library interface method.
+func (optionalLibrary) CompileOptions() []EnvOption {
+ paramTypeK := TypeParamType("K")
+ paramTypeV := TypeParamType("V")
+ optionalTypeV := OptionalType(paramTypeV)
+ listTypeV := ListType(paramTypeV)
+ mapTypeKV := MapType(paramTypeK, paramTypeV)
+
+ return []EnvOption{
+ // Enable the optional syntax in the parser.
+ enableOptionalSyntax(),
+
+ // Introduce the optional type.
+ Types(types.OptionalType),
+
+ // Global and member functions for working with optional values.
+ Function("optional.of",
+ Overload("optional_of", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ return types.OptionalOf(value)
+ }))),
+ Function("optional.ofNonZeroValue",
+ Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ v, isZeroer := value.(traits.Zeroer)
+ if !isZeroer || !v.IsZeroValue() {
+ return types.OptionalOf(value)
+ }
+ return types.OptionalNone
+ }))),
+ Function("optional.none",
+ Overload("optional_none", []*Type{}, optionalTypeV,
+ FunctionBinding(func(values ...ref.Val) ref.Val {
+ return types.OptionalNone
+ }))),
+ Function("value",
+ MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return opt.GetValue()
+ }))),
+ Function("hasValue",
+ MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return types.Bool(opt.HasValue())
+ }))),
+
+ // Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the
+ // evaluation chain.
+ Function("or",
+ MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)),
+ Function("orValue",
+ MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)),
+
+ // OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the
+ // optput type.
+ Function(operators.OptSelect,
+ Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)),
+
+ // OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use
+ // these signatures to determine type-agreement without any special handling.
+ Function(operators.OptIndex,
+ Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV),
+ Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV),
+ Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+
+ // Index overloads to accommodate using an optional value as the operand.
+ Function(operators.Index,
+ Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("optional_map_index_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+ }
+}
+
+// ProgramOptions implements the Library interface method.
+func (optionalLibrary) ProgramOptions() []ProgramOption {
+ return []ProgramOption{
+ CustomDecorator(decorateOptionalOr),
+ }
+}
+
+func enableOptionalSyntax() EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true))
+ return e, nil
+ }
+}
+
+func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) {
+ call, ok := i.(interpreter.InterpretableCall)
+ if !ok {
+ return i, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return i, nil
+ }
+ switch call.Function() {
+ case "or":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_or_optional" {
+ return i, nil
+ }
+ return &evalOptionalOr{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ case "orValue":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_orValue_value" {
+ return i, nil
+ }
+ return &evalOptionalOrValue{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ default:
+ return i, nil
+ }
+}
+
+// evalOptionalOr selects between two optional values, either the first if it has a value, or
+// the second optional expression is evaluated and returned.
+type evalOptionalOr struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOr) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal
+ }
+ return opt.rhs.Eval(ctx)
+}
+
+// evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value,
+// its value is returned, otherwise the alternative value expression is evaluated and returned.
+type evalOptionalOrValue struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOrValue) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal.GetValue()
+ }
+ return opt.rhs.Eval(ctx)
+}
+
type timeUTCLibrary struct{}
func (timeUTCLibrary) CompileOptions() []EnvOption {
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/macro.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/macro.go
index e43cb4eeea1b..e48c5bf8eedd 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/macro.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/macro.go
@@ -17,6 +17,7 @@ package cel
import (
"github.com/google/cel-go/common"
"github.com/google/cel-go/parser"
+
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
@@ -26,8 +27,11 @@ import (
// a Macro should be created per arg-count or as a var arg macro.
type Macro = parser.Macro
-// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree, or an error
-// if the input arguments are not suitable for the expansion requirements for the macro in question.
+// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree.
+//
+// If the MacroExpander determines within the implementation that an expansion is not needed it may return
+// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
+// are not well-formed, the result of the expansion will be an error.
//
// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
// and produces as output an Expr ast node.
@@ -81,8 +85,10 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex
// input to produce an output list.
//
// There are two call patterns supported by map:
-// .map(, )
-// .map(, , )
+//
+// .map(, )
+// .map(, , )
+//
// In the second form only iterVar values which return true when provided to the predicate expression
// are transformed.
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/options.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/options.go
index 21c757010616..63321b548170 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/options.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/options.go
@@ -29,6 +29,7 @@ import (
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
+ "github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
descpb "google.golang.org/protobuf/types/descriptorpb"
@@ -61,6 +62,10 @@ const (
// on a CEL timestamp operation. This fixes the scenario where the input time
// is not already in UTC.
featureDefaultUTCTimeZone
+
+ // Enable the use of optional types in the syntax, type-system, type-checking,
+ // and runtime.
+ featureOptionalTypes
)
// EnvOption is a functional interface for configuring the environment.
@@ -163,19 +168,19 @@ func Container(name string) EnvOption {
// Abbreviations can be useful when working with variables, functions, and especially types from
// multiple namespaces:
//
-// // CEL object construction
-// qual.pkg.version.ObjTypeName{
-// field: alt.container.ver.FieldTypeName{value: ...}
-// }
+// // CEL object construction
+// qual.pkg.version.ObjTypeName{
+// field: alt.container.ver.FieldTypeName{value: ...}
+// }
//
// Only one the qualified names above may be used as the CEL container, so at least one of these
// references must be a long qualified name within an otherwise short CEL program. Using the
// following abbreviations, the program becomes much simpler:
//
-// // CEL Go option
-// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
-// // Simplified Object construction
-// ObjTypeName{field: FieldTypeName{value: ...}}
+// // CEL Go option
+// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
+// // Simplified Object construction
+// ObjTypeName{field: FieldTypeName{value: ...}}
//
// There are a few rules for the qualified names and the simple abbreviations generated from them:
// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
@@ -188,9 +193,12 @@ func Container(name string) EnvOption {
// - Expanded abbreviations do not participate in namespace resolution.
// - Abbreviation expansion is done instead of the container search for a matching identifier.
// - Containers follow C++ namespace resolution rules with searches from the most qualified name
-// to the least qualified name.
+//
+// to the least qualified name.
+//
// - Container references within the CEL program may be relative, and are resolved to fully
-// qualified names at either type-check time or program plan time, whichever comes first.
+//
+// qualified names at either type-check time or program plan time, whichever comes first.
//
// If there is ever a case where an identifier could be in both the container and as an
// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
@@ -216,7 +224,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption {
// environment by default.
//
// Note: This option must be specified after the CustomTypeProvider option when used together.
-func Types(addTypes ...interface{}) EnvOption {
+func Types(addTypes ...any) EnvOption {
return func(e *Env) (*Env, error) {
reg, isReg := e.provider.(ref.TypeRegistry)
if !isReg {
@@ -253,7 +261,7 @@ func Types(addTypes ...interface{}) EnvOption {
//
// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
// extension or by re-using the same EnvOption with another NewEnv() call.
-func TypeDescs(descs ...interface{}) EnvOption {
+func TypeDescs(descs ...any) EnvOption {
return func(e *Env) (*Env, error) {
reg, isReg := e.provider.(ref.TypeRegistry)
if !isReg {
@@ -350,8 +358,8 @@ func Functions(funcs ...*functions.Overload) ProgramOption {
// variables with the same name provided to the Eval() call. If Globals is used in a Library with
// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
//
-// The vars value may either be an `interpreter.Activation` instance or a `map[string]interface{}`.
-func Globals(vars interface{}) ProgramOption {
+// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`.
+func Globals(vars any) ProgramOption {
return func(p *prog) (*prog, error) {
defaultVars, err := interpreter.NewActivation(vars)
if err != nil {
@@ -404,6 +412,9 @@ const (
// OptTrackCost enables the runtime cost calculation while validation and return cost within evalDetails
// cost calculation is available via func ActualCost()
OptTrackCost EvalOption = 1 << iota
+
+ // OptCheckStringFormat enables compile-time checking of string.format calls for syntax/cardinality.
+ OptCheckStringFormat EvalOption = 1 << iota
)
// EvalOptions sets one or more evaluation options which may affect the evaluation or Result.
@@ -534,6 +545,13 @@ func DefaultUTCTimeZone(enabled bool) EnvOption {
return features(featureDefaultUTCTimeZone, enabled)
}
+// OptionalTypes enable support for optional syntax and types in CEL. The optional value type makes
+// it possible to express whether variables have been provided, whether a result has been computed,
+// and in the future whether an object field path, map key value, or list index has a value.
+func OptionalTypes() EnvOption {
+ return Lib(optionalLibrary{})
+}
+
// features sets the given feature flags. See list of Feature constants above.
func features(flag int, enabled bool) EnvOption {
return func(e *Env) (*Env, error) {
@@ -541,3 +559,12 @@ func features(flag int, enabled bool) EnvOption {
return e, nil
}
}
+
+// ParserRecursionLimit adjusts the AST depth the parser will tolerate.
+// Defaults defined in the parser package.
+func ParserRecursionLimit(limit int) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.MaxRecursionDepth(limit))
+ return e, nil
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/program.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/program.go
index 6219a4da588e..a630f5bfa1f2 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/program.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/program.go
@@ -17,21 +17,20 @@ package cel
import (
"context"
"fmt"
- "math"
"sync"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Program is an evaluable view of an Ast.
type Program interface {
// Eval returns the result of an evaluation of the Ast and environment against the input vars.
//
- // The vars value may either be an `interpreter.Activation` or a `map[string]interface{}`.
+ // The vars value may either be an `interpreter.Activation` or a `map[string]any`.
//
// If the `OptTrackState`, `OptTrackCost` or `OptExhaustiveEval` flags are used, the `details` response will
// be non-nil. Given this caveat on `details`, the return state from evaluation will be:
@@ -43,16 +42,16 @@ type Program interface {
// An unsuccessful evaluation is typically the result of a series of incompatible `EnvOption`
// or `ProgramOption` values used in the creation of the evaluation environment or executable
// program.
- Eval(interface{}) (ref.Val, *EvalDetails, error)
+ Eval(any) (ref.Val, *EvalDetails, error)
// ContextEval evaluates the program with a set of input variables and a context object in order
// to support cancellation and timeouts. This method must be used in conjunction with the
// InterruptCheckFrequency() option for cancellation interrupts to be impact evaluation.
//
- // The vars value may either be an `interpreter.Activation` or `map[string]interface{}`.
+ // The vars value may either be an `interpreter.Activation` or `map[string]any`.
//
// The output contract for `ContextEval` is otherwise identical to the `Eval` method.
- ContextEval(context.Context, interface{}) (ref.Val, *EvalDetails, error)
+ ContextEval(context.Context, any) (ref.Val, *EvalDetails, error)
}
// NoVars returns an empty Activation.
@@ -65,7 +64,7 @@ func NoVars() interpreter.Activation {
//
// The `vars` value may either be an interpreter.Activation or any valid input to the
// interpreter.NewActivation call.
-func PartialVars(vars interface{},
+func PartialVars(vars any,
unknowns ...*interpreter.AttributePattern) (interpreter.PartialActivation, error) {
return interpreter.NewPartialActivation(vars, unknowns...)
}
@@ -207,6 +206,37 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
if len(p.regexOptimizations) > 0 {
decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
}
+ // Enable compile-time checking of syntax/cardinality for string.format calls.
+ if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat {
+ var isValidType func(id int64, validTypes ...*types.TypeValue) (bool, error)
+ if ast.IsChecked() {
+ isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) {
+ t, err := ExprTypeToType(ast.typeMap[id])
+ if err != nil {
+ return false, err
+ }
+ if t.kind == DynKind {
+ return true, nil
+ }
+ for _, vt := range validTypes {
+ k, err := typeValueToKind(vt)
+ if err != nil {
+ return false, err
+ }
+ if k == t.kind {
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+ } else {
+ // if the AST isn't type-checked, short-circuit validation
+ isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) {
+ return true, nil
+ }
+ }
+ decorators = append(decorators, interpreter.InterpolateFormattedString(isValidType))
+ }
// Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
@@ -268,7 +298,7 @@ func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecor
}
// Eval implements the Program interface method.
-func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error) {
+func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
// Configure error recovery for unexpected panics during evaluation. Note, the use of named
// return values makes it possible to modify the error response during the recovery
// function.
@@ -287,11 +317,11 @@ func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error)
switch v := input.(type) {
case interpreter.Activation:
vars = v
- case map[string]interface{}:
+ case map[string]any:
vars = activationPool.Setup(v)
defer activationPool.Put(vars)
default:
- return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
}
if p.defaultVars != nil {
vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
@@ -307,7 +337,7 @@ func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error)
}
// ContextEval implements the Program interface.
-func (p *prog) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
+func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
if ctx == nil {
return nil, nil, fmt.Errorf("context can not be nil")
}
@@ -318,22 +348,17 @@ func (p *prog) ContextEval(ctx context.Context, input interface{}) (ref.Val, *Ev
case interpreter.Activation:
vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
defer ctxActivationPool.Put(vars)
- case map[string]interface{}:
+ case map[string]any:
rawVars := activationPool.Setup(v)
defer activationPool.Put(rawVars)
vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency)
defer ctxActivationPool.Put(vars)
default:
- return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
}
return p.Eval(vars)
}
-// Cost implements the Coster interface method.
-func (p *prog) Cost() (min, max int64) {
- return estimateCost(p.interpretable)
-}
-
// progFactory is a helper alias for marking a program creation factory function.
type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
@@ -354,7 +379,7 @@ func newProgGen(factory progFactory) (Program, error) {
}
// Eval implements the Program interface method.
-func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
+func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
// The factory based Eval() differs from the standard evaluation model in that it generates a
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
// results.
@@ -379,7 +404,7 @@ func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
}
// ContextEval implements the Program interface method.
-func (gen *progGen) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
+func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
if ctx == nil {
return nil, nil, fmt.Errorf("context can not be nil")
}
@@ -406,29 +431,6 @@ func (gen *progGen) ContextEval(ctx context.Context, input interface{}) (ref.Val
return v, det, nil
}
-// Cost implements the Coster interface method.
-func (gen *progGen) Cost() (min, max int64) {
- // Use an empty state value since no evaluation is performed.
- p, err := gen.factory(emptyEvalState, nil)
- if err != nil {
- return 0, math.MaxInt64
- }
- return estimateCost(p)
-}
-
-// EstimateCost returns the heuristic cost interval for the program.
-func EstimateCost(p Program) (min, max int64) {
- return estimateCost(p)
-}
-
-func estimateCost(i interface{}) (min, max int64) {
- c, ok := i.(interpreter.Coster)
- if !ok {
- return 0, math.MaxInt64
- }
- return c.Cost()
-}
-
type ctxEvalActivation struct {
parent interpreter.Activation
interrupt <-chan struct{}
@@ -438,7 +440,7 @@ type ctxEvalActivation struct {
// ResolveName implements the Activation interface method, but adds a special #interrupted variable
// which is capable of testing whether a 'done' signal is provided from a context.Context channel.
-func (a *ctxEvalActivation) ResolveName(name string) (interface{}, bool) {
+func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {
if name == "#interrupted" {
a.interruptCheckCount++
if a.interruptCheckCount%a.interruptCheckFrequency == 0 {
@@ -461,7 +463,7 @@ func (a *ctxEvalActivation) Parent() interpreter.Activation {
func newCtxEvalActivationPool() *ctxEvalActivationPool {
return &ctxEvalActivationPool{
Pool: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &ctxEvalActivation{}
},
},
@@ -483,21 +485,21 @@ func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan s
}
type evalActivation struct {
- vars map[string]interface{}
- lazyVars map[string]interface{}
+ vars map[string]any
+ lazyVars map[string]any
}
// ResolveName looks up the value of the input variable name, if found.
//
// Lazy bindings may be supplied within the map-based input in either of the following forms:
-// - func() interface{}
+// - func() any
// - func() ref.Val
//
// The lazy binding will only be invoked once per evaluation.
//
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
// the ref.TypeAdapter configured in the environment.
-func (a *evalActivation) ResolveName(name string) (interface{}, bool) {
+func (a *evalActivation) ResolveName(name string) (any, bool) {
v, found := a.vars[name]
if !found {
return nil, false
@@ -510,7 +512,7 @@ func (a *evalActivation) ResolveName(name string) (interface{}, bool) {
lazy := obj()
a.lazyVars[name] = lazy
return lazy, true
- case func() interface{}:
+ case func() any:
if resolved, found := a.lazyVars[name]; found {
return resolved, true
}
@@ -530,8 +532,8 @@ func (a *evalActivation) Parent() interpreter.Activation {
func newEvalActivationPool() *evalActivationPool {
return &evalActivationPool{
Pool: sync.Pool{
- New: func() interface{} {
- return &evalActivation{lazyVars: make(map[string]interface{})}
+ New: func() any {
+ return &evalActivation{lazyVars: make(map[string]any)}
},
},
}
@@ -542,13 +544,13 @@ type evalActivationPool struct {
}
// Setup initializes a pooled Activation object with the map input.
-func (p *evalActivationPool) Setup(vars map[string]interface{}) *evalActivation {
+func (p *evalActivationPool) Setup(vars map[string]any) *evalActivation {
a := p.Pool.Get().(*evalActivation)
a.vars = vars
return a
}
-func (p *evalActivationPool) Put(value interface{}) {
+func (p *evalActivationPool) Put(value any) {
a := value.(*evalActivation)
for k := range a.lazyVars {
delete(a.lazyVars, k)
@@ -559,7 +561,7 @@ func (p *evalActivationPool) Put(value interface{}) {
var (
emptyEvalState = interpreter.NewEvalState()
- // activationPool is an internally managed pool of Activation values that wrap map[string]interface{} inputs
+ // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs
activationPool = newEvalActivationPool()
// ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/checker.go b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/checker.go
index fcddb1b2c28b..257cffecf66b 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/checker.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/checker.go
@@ -23,6 +23,7 @@ import (
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types/ref"
"google.golang.org/protobuf/proto"
@@ -173,8 +174,8 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
// Rewrite the node to be a variable reference to the resolved fully-qualified
// variable name.
- c.setType(e, ident.GetIdent().Type)
- c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().Value))
+ c.setType(e, ident.GetIdent().GetType())
+ c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue()))
identName := ident.GetName()
e.ExprKind = &exprpb.Expr_IdentExpr{
IdentExpr: &exprpb.Expr_Ident{
@@ -185,9 +186,37 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
}
}
+ resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false)
+ if sel.TestOnly {
+ resultType = decls.Bool
+ }
+ c.setType(e, substitute(c.mappings, resultType, false))
+}
+
+func (c *checker) checkOptSelect(e *exprpb.Expr) {
+ // Collect metadata related to the opt select call packaged by the parser.
+ call := e.GetCallExpr()
+ operand := call.GetArgs()[0]
+ field := call.GetArgs()[1]
+ fieldName, isString := maybeUnwrapString(field)
+ if !isString {
+ c.errors.ReportError(c.location(field), "unsupported optional field selection: %v", field)
+ return
+ }
+
+ // Perform type-checking using the field selection logic.
+ resultType := c.checkSelectField(e, operand, fieldName, true)
+ c.setType(e, substitute(c.mappings, resultType, false))
+}
+
+func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *exprpb.Type {
// Interpret as field selection, first traversing down the operand.
- c.check(sel.GetOperand())
- targetType := substitute(c.mappings, c.getType(sel.GetOperand()), false)
+ c.check(operand)
+ operandType := substitute(c.mappings, c.getType(operand), false)
+
+ // If the target type is 'optional', unwrap it for the sake of this check.
+ targetType, isOpt := maybeUnwrapOptional(operandType)
+
// Assume error type by default as most types do not support field selection.
resultType := decls.Error
switch kindOf(targetType) {
@@ -199,7 +228,7 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
// Objects yield their field type declaration as the selection result type, but only if
// the field is defined.
messageType := targetType
- if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), sel.GetField()); found {
+ if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), field); found {
resultType = fieldType.Type
}
case kindTypeParam:
@@ -212,16 +241,17 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
default:
// Dynamic / error values are treated as DYN type. Errors are handled this way as well
// in order to allow forward progress on the check.
- if isDynOrError(targetType) {
- resultType = decls.Dyn
- } else {
+ if !isDynOrError(targetType) {
c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType)
}
+ resultType = decls.Dyn
}
- if sel.TestOnly {
- resultType = decls.Bool
+
+ // If the target type was optional coming in, then the result must be optional going out.
+ if isOpt || optional {
+ return decls.NewOptionalType(resultType)
}
- c.setType(e, substitute(c.mappings, resultType, false))
+ return resultType
}
func (c *checker) checkCall(e *exprpb.Expr) {
@@ -229,15 +259,19 @@ func (c *checker) checkCall(e *exprpb.Expr) {
// please consider the impact on planner.go and consolidate implementations or mirror code
// as appropriate.
call := e.GetCallExpr()
- target := call.GetTarget()
- args := call.GetArgs()
fnName := call.GetFunction()
+ if fnName == operators.OptSelect {
+ c.checkOptSelect(e)
+ return
+ }
+ args := call.GetArgs()
// Traverse arguments.
for _, arg := range args {
c.check(arg)
}
+ target := call.GetTarget()
// Regular static call with simple name.
if target == nil {
// Check for the existence of the function.
@@ -359,6 +393,9 @@ func (c *checker) resolveOverload(
}
if resultType == nil {
+ for i, arg := range argTypes {
+ argTypes[i] = substitute(c.mappings, arg, true)
+ }
c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil)
resultType = decls.Error
return nil
@@ -369,16 +406,29 @@ func (c *checker) resolveOverload(
func (c *checker) checkCreateList(e *exprpb.Expr) {
create := e.GetListExpr()
- var elemType *exprpb.Type
- for _, e := range create.GetElements() {
+ var elemsType *exprpb.Type
+ optionalIndices := create.GetOptionalIndices()
+ optionals := make(map[int32]bool, len(optionalIndices))
+ for _, optInd := range optionalIndices {
+ optionals[optInd] = true
+ }
+ for i, e := range create.GetElements() {
c.check(e)
- elemType = c.joinTypes(c.location(e), elemType, c.getType(e))
+ elemType := c.getType(e)
+ if optionals[int32(i)] {
+ var isOptional bool
+ elemType, isOptional = maybeUnwrapOptional(elemType)
+ if !isOptional && !isDyn(elemType) {
+ c.errors.typeMismatch(c.location(e), decls.NewOptionalType(elemType), elemType)
+ }
+ }
+ elemsType = c.joinTypes(c.location(e), elemsType, elemType)
}
- if elemType == nil {
+ if elemsType == nil {
// If the list is empty, assign free type var to elem type.
- elemType = c.newTypeVar()
+ elemsType = c.newTypeVar()
}
- c.setType(e, decls.NewListType(elemType))
+ c.setType(e, decls.NewListType(elemsType))
}
func (c *checker) checkCreateStruct(e *exprpb.Expr) {
@@ -392,22 +442,31 @@ func (c *checker) checkCreateStruct(e *exprpb.Expr) {
func (c *checker) checkCreateMap(e *exprpb.Expr) {
mapVal := e.GetStructExpr()
- var keyType *exprpb.Type
- var valueType *exprpb.Type
+ var mapKeyType *exprpb.Type
+ var mapValueType *exprpb.Type
for _, ent := range mapVal.GetEntries() {
key := ent.GetMapKey()
c.check(key)
- keyType = c.joinTypes(c.location(key), keyType, c.getType(key))
-
- c.check(ent.Value)
- valueType = c.joinTypes(c.location(ent.Value), valueType, c.getType(ent.Value))
+ mapKeyType = c.joinTypes(c.location(key), mapKeyType, c.getType(key))
+
+ val := ent.GetValue()
+ c.check(val)
+ valType := c.getType(val)
+ if ent.GetOptionalEntry() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(c.location(val), decls.NewOptionalType(valType), valType)
+ }
+ }
+ mapValueType = c.joinTypes(c.location(val), mapValueType, valType)
}
- if keyType == nil {
+ if mapKeyType == nil {
// If the map is empty, assign free type variables to typeKey and value type.
- keyType = c.newTypeVar()
- valueType = c.newTypeVar()
+ mapKeyType = c.newTypeVar()
+ mapValueType = c.newTypeVar()
}
- c.setType(e, decls.NewMapType(keyType, valueType))
+ c.setType(e, decls.NewMapType(mapKeyType, mapValueType))
}
func (c *checker) checkCreateMessage(e *exprpb.Expr) {
@@ -449,15 +508,21 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
c.check(value)
fieldType := decls.Error
- if t, found := c.lookupFieldType(
- c.locationByID(ent.GetId()),
- messageType.GetMessageType(),
- field); found {
- fieldType = t.Type
+ ft, found := c.lookupFieldType(c.locationByID(ent.GetId()), messageType.GetMessageType(), field)
+ if found {
+ fieldType = ft.Type
+ }
+
+ valType := c.getType(value)
+ if ent.GetOptionalEntry() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(c.location(value), decls.NewOptionalType(valType), valType)
+ }
}
- if !c.isAssignable(fieldType, c.getType(value)) {
- c.errors.fieldTypeMismatch(
- c.locationByID(ent.Id), field, fieldType, c.getType(value))
+ if !c.isAssignable(fieldType, valType) {
+ c.errors.fieldTypeMismatch(c.locationByID(ent.Id), field, fieldType, valType)
}
}
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/cost.go b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/cost.go
index 7312d1fe2f85..6cf8c4fea095 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/cost.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/cost.go
@@ -92,7 +92,10 @@ func (e astNode) ComputedSize() *SizeEstimate {
case *exprpb.Expr_ConstExpr:
switch ck := ek.ConstExpr.GetConstantKind().(type) {
case *exprpb.Constant_StringValue:
- v = uint64(len(ck.StringValue))
+ // converting to runes here is an O(n) operation, but
+ // this is consistent with how size is computed at runtime,
+ // and how the language definition defines string size
+ v = uint64(len([]rune(ck.StringValue)))
case *exprpb.Constant_BytesValue:
v = uint64(len(ck.BytesValue))
case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue,
@@ -340,6 +343,11 @@ func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
sel := e.GetSelectExpr()
var sum CostEstimate
if sel.GetTestOnly() {
+ // recurse, but do not add any cost
+ // this is equivalent to how evalTestOnly increments the runtime cost counter
+ // but does not add any additional cost for the qualifier, except here we do
+ // the reverse (ident adds cost)
+ sum = sum.Add(c.cost(sel.GetOperand()))
return sum
}
sum = sum.Add(c.cost(sel.GetOperand()))
@@ -503,7 +511,10 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
}
switch overloadID {
// O(n) functions
- case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString:
+ case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString:
+ if overloadID == overloads.ExtFormatString {
+ return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
if len(args) == 1 {
return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/decls/decls.go b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/decls/decls.go
index 88a99282d9ab..0d91bef51451 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/decls/decls.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/decls/decls.go
@@ -16,9 +16,9 @@
package decls
import (
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
emptypb "google.golang.org/protobuf/types/known/emptypb"
structpb "google.golang.org/protobuf/types/known/structpb"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
var (
@@ -64,6 +64,12 @@ func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
ParameterTypes: paramTypes}}}
}
+// NewOptionalType constructs an abstract type indicating that the parameterized type
+// may be contained within the object.
+func NewOptionalType(paramType *exprpb.Type) *exprpb.Type {
+ return NewAbstractType("optional", paramType)
+}
+
// NewFunctionType creates a function invocation contract, typically only used
// by type-checking steps after overload resolution.
func NewFunctionType(resultType *exprpb.Type,
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/printer.go b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/printer.go
index e2ed35be8342..0cecc5210dde 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/printer.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/printer.go
@@ -26,7 +26,7 @@ type semanticAdorner struct {
var _ debug.Adorner = &semanticAdorner{}
-func (a *semanticAdorner) GetMetadata(elem interface{}) string {
+func (a *semanticAdorner) GetMetadata(elem any) string {
result := ""
e, isExpr := elem.(*exprpb.Expr)
if !isExpr {
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/standard.go b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/standard.go
index 5b48a9046a0b..e64337ba44ae 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/standard.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/standard.go
@@ -287,6 +287,8 @@ func init() {
decls.NewInstanceOverload(overloads.EndsWithString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.Matches,
+ decls.NewOverload(overloads.Matches,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewInstanceOverload(overloads.MatchesString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.StartsWith,
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/types.go b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/types.go
index 8683797d5bbb..28d21c9d9291 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/types.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/types.go
@@ -90,6 +90,14 @@ func FormatCheckedType(t *exprpb.Type) string {
return "!error!"
case kindTypeParam:
return t.GetTypeParam()
+ case kindAbstract:
+ at := t.GetAbstractType()
+ params := at.GetParameterTypes()
+ paramStrs := make([]string, len(params))
+ for i, p := range params {
+ paramStrs[i] = FormatCheckedType(p)
+ }
+ return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
}
return t.String()
}
@@ -110,12 +118,39 @@ func isDyn(t *exprpb.Type) bool {
// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
func isDynOrError(t *exprpb.Type) bool {
- switch kindOf(t) {
- case kindError:
- return true
- default:
- return isDyn(t)
+ return isError(t) || isDyn(t)
+}
+
+func isError(t *exprpb.Type) bool {
+ return kindOf(t) == kindError
+}
+
+func isOptional(t *exprpb.Type) bool {
+ if kindOf(t) == kindAbstract {
+ at := t.GetAbstractType()
+ return at.GetName() == "optional"
+ }
+ return false
+}
+
+func maybeUnwrapOptional(t *exprpb.Type) (*exprpb.Type, bool) {
+ if isOptional(t) {
+ at := t.GetAbstractType()
+ return at.GetParameterTypes()[0], true
+ }
+ return t, false
+}
+
+func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ literal := e.GetConstExpr()
+ switch literal.GetConstantKind().(type) {
+ case *exprpb.Constant_StringValue:
+ return literal.GetStringValue(), true
+ }
}
+ return "", false
}
// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
@@ -236,7 +271,7 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
// substitution for t1, and whether t2 has a type substitution in mapping m.
//
// The type t2 is a valid substitution for t1 if any of the following statements is true
-// - t2 has a type substitition (t2sub) equal to t1
+// - t2 has a type substitution (t2sub) equal to t1
// - t2 has a type substitution (t2sub) assignable to t1
// - t2 does not occur within t1.
func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub bool) {
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/debug/debug.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/debug/debug.go
index bec885424b11..5dab156ef36c 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/debug/debug.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/debug/debug.go
@@ -29,7 +29,7 @@ import (
// representation of an expression.
type Adorner interface {
// GetMetadata for the input context.
- GetMetadata(ctx interface{}) string
+ GetMetadata(ctx any) string
}
// Writer manages writing expressions to an internal string.
@@ -46,7 +46,7 @@ type emptyDebugAdorner struct {
var emptyAdorner Adorner = &emptyDebugAdorner{}
-func (a *emptyDebugAdorner) GetMetadata(e interface{}) string {
+func (a *emptyDebugAdorner) GetMetadata(e any) string {
return ""
}
@@ -170,6 +170,9 @@ func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
w.append(",")
w.appendLine()
}
+ if entry.GetOptionalEntry() {
+ w.append("?")
+ }
w.append(entry.GetFieldKey())
w.append(":")
w.Buffer(entry.GetValue())
@@ -191,6 +194,9 @@ func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
w.append(",")
w.appendLine()
}
+ if entry.GetOptionalEntry() {
+ w.append("?")
+ }
w.Buffer(entry.GetMapKey())
w.append(":")
w.Buffer(entry.GetValue())
@@ -269,7 +275,7 @@ func (w *debugWriter) append(s string) {
w.buffer.WriteString(s)
}
-func (w *debugWriter) appendFormat(f string, args ...interface{}) {
+func (w *debugWriter) appendFormat(f string, args ...any) {
w.append(fmt.Sprintf(f, args...))
}
@@ -280,7 +286,7 @@ func (w *debugWriter) doIndent() {
}
}
-func (w *debugWriter) adorn(e interface{}) {
+func (w *debugWriter) adorn(e any) {
w.append(w.adorner.GetMetadata(e))
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/errors.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/errors.go
index daebba8609a3..1565085ab9a5 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/errors.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/errors.go
@@ -38,7 +38,7 @@ func NewErrors(source Source) *Errors {
}
// ReportError records an error at a source location.
-func (e *Errors) ReportError(l Location, format string, args ...interface{}) {
+func (e *Errors) ReportError(l Location, format string, args ...any) {
e.numErrors++
if e.numErrors > e.maxErrorsToReport {
return
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/operators/operators.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/operators/operators.go
index fa25dfb7f077..f9b39bda3fe0 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/operators/operators.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/operators/operators.go
@@ -37,6 +37,8 @@ const (
Modulo = "_%_"
Negate = "-_"
Index = "_[_]"
+ OptIndex = "_[?_]"
+ OptSelect = "_?._"
// Macros, must have a valid identifier.
Has = "has"
@@ -99,6 +101,8 @@ var (
LogicalNot: {displayName: "!", precedence: 2, arity: 1},
Negate: {displayName: "-", precedence: 2, arity: 1},
Index: {displayName: "", precedence: 1, arity: 2},
+ OptIndex: {displayName: "", precedence: 1, arity: 2},
+ OptSelect: {displayName: "", precedence: 1, arity: 2},
}
)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/overloads/overloads.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/overloads/overloads.go
index 9ebaf6fabf78..9d50f4367bf2 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/overloads/overloads.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/overloads/overloads.go
@@ -148,6 +148,11 @@ const (
StartsWith = "startsWith"
)
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtQuoteString = "strings_quote"
+)
+
// String function overload names.
const (
ContainsString = "contains_string"
@@ -156,6 +161,11 @@ const (
StartsWithString = "starts_with_string"
)
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtFormatString = "string_format"
+)
+
// Time-based functions.
const (
TimeGetFullYear = "getFullYear"
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/BUILD.bazel
index 5f1b1cd1fd23..f56700de5d62 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/BUILD.bazel
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/BUILD.bazel
@@ -22,6 +22,7 @@ go_library(
"map.go",
"null.go",
"object.go",
+ "optional.go",
"overflow.go",
"provider.go",
"string.go",
@@ -40,8 +41,6 @@ go_library(
"@com_github_stoewer_go_strcase//:go_default_library",
"@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
"@org_golang_google_genproto//googleapis/rpc/status:go_default_library",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
@@ -68,6 +67,7 @@ go_test(
"map_test.go",
"null_test.go",
"object_test.go",
+ "optional_test.go",
"provider_test.go",
"string_test.go",
"timestamp_test.go",
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/bool.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/bool.go
index 1b55ba9529e6..a634ecc2879e 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/bool.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/bool.go
@@ -62,7 +62,7 @@ func (b Bool) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (b Bool) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Bool:
return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
@@ -114,6 +114,11 @@ func (b Bool) Equal(other ref.Val) ref.Val {
return Bool(ok && b == otherBool)
}
+// IsZeroValue returns true if the boolean value is false.
+func (b Bool) IsZeroValue() bool {
+ return b == False
+}
+
// Negate implements the traits.Negater interface method.
func (b Bool) Negate() ref.Val {
return !b
@@ -125,7 +130,7 @@ func (b Bool) Type() ref.Type {
}
// Value implements the ref.Val interface method.
-func (b Bool) Value() interface{} {
+func (b Bool) Value() any {
return bool(b)
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/bytes.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/bytes.go
index 3575717ec716..bef190759fd8 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/bytes.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/bytes.go
@@ -63,7 +63,7 @@ func (b Bytes) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (b Bytes) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Array, reflect.Slice:
return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
@@ -116,6 +116,11 @@ func (b Bytes) Equal(other ref.Val) ref.Val {
return Bool(ok && bytes.Equal(b, otherBytes))
}
+// IsZeroValue returns true if the byte array is empty.
+func (b Bytes) IsZeroValue() bool {
+ return len(b) == 0
+}
+
// Size implements the traits.Sizer interface method.
func (b Bytes) Size() ref.Val {
return Int(len(b))
@@ -127,6 +132,6 @@ func (b Bytes) Type() ref.Type {
}
// Value implements the ref.Val interface method.
-func (b Bytes) Value() interface{} {
+func (b Bytes) Value() any {
return []byte(b)
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/double.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/double.go
index a6ec52a0f9e2..bda9f31a6bf0 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/double.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/double.go
@@ -78,7 +78,7 @@ func (d Double) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (d Double) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Float32:
v := float32(d)
@@ -134,13 +134,13 @@ func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
case IntType:
i, err := doubleToInt64Checked(float64(d))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(i)
case UintType:
i, err := doubleToUint64Checked(float64(d))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(i)
case DoubleType:
@@ -182,6 +182,11 @@ func (d Double) Equal(other ref.Val) ref.Val {
}
}
+// IsZeroValue returns true if double value is 0.0
+func (d Double) IsZeroValue() bool {
+ return float64(d) == 0.0
+}
+
// Multiply implements traits.Multiplier.Multiply.
func (d Double) Multiply(other ref.Val) ref.Val {
otherDouble, ok := other.(Double)
@@ -211,6 +216,6 @@ func (d Double) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (d Double) Value() interface{} {
+func (d Double) Value() any {
return float64(d)
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/duration.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/duration.go
index 418349fa6cc9..c90ac1bee957 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/duration.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/duration.go
@@ -57,14 +57,14 @@ func (d Duration) Add(other ref.Val) ref.Val {
dur2 := other.(Duration)
val, err := addDurationChecked(d.Duration, dur2.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return durationOf(val)
case TimestampType:
ts := other.(Timestamp).Time
val, err := addTimeDurationChecked(ts, d.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return timestampOf(val)
}
@@ -90,7 +90,7 @@ func (d Duration) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (d Duration) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the duration is already assignable to the desired type return it.
if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) {
return d.Duration, nil
@@ -138,11 +138,16 @@ func (d Duration) Equal(other ref.Val) ref.Val {
return Bool(ok && d.Duration == otherDur.Duration)
}
+// IsZeroValue returns true if the duration value is zero
+func (d Duration) IsZeroValue() bool {
+ return d.Duration == 0
+}
+
// Negate implements traits.Negater.Negate.
func (d Duration) Negate() ref.Val {
val, err := negateDurationChecked(d.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return durationOf(val)
}
@@ -165,7 +170,7 @@ func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
}
val, err := subtractDurationChecked(d.Duration, subtraDur.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return durationOf(val)
}
@@ -176,7 +181,7 @@ func (d Duration) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (d Duration) Value() interface{} {
+func (d Duration) Value() any {
return d.Duration
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/err.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/err.go
index 93d79cdcbc51..b4874d9d4d11 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/err.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/err.go
@@ -22,6 +22,12 @@ import (
"github.com/google/cel-go/common/types/ref"
)
+// Error interface which allows types types.Err values to be treated as error values.
+type Error interface {
+ error
+ ref.Val
+}
+
// Err type which extends the built-in go error and implements ref.Val.
type Err struct {
error
@@ -51,7 +57,7 @@ var (
// NewErr creates a new Err described by the format string and args.
// TODO: Audit the use of this function and standardize the error messages and codes.
-func NewErr(format string, args ...interface{}) ref.Val {
+func NewErr(format string, args ...any) ref.Val {
return &Err{fmt.Errorf(format, args...)}
}
@@ -62,7 +68,7 @@ func NoSuchOverloadErr() ref.Val {
// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion
// message that indicates that the native value could not be converted to a CEL ref.Val.
-func UnsupportedRefValConversionErr(val interface{}) ref.Val {
+func UnsupportedRefValConversionErr(val any) ref.Val {
return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val)
}
@@ -74,20 +80,20 @@ func MaybeNoSuchOverloadErr(val ref.Val) ref.Val {
// ValOrErr either returns the existing error or creates a new one.
// TODO: Audit the use of this function and standardize the error messages and codes.
-func ValOrErr(val ref.Val, format string, args ...interface{}) ref.Val {
+func ValOrErr(val ref.Val, format string, args ...any) ref.Val {
if val == nil || !IsUnknownOrError(val) {
return NewErr(format, args...)
}
return val
}
-// wrapErr wraps an existing Go error value into a CEL Err value.
-func wrapErr(err error) ref.Val {
+// WrapErr wraps an existing Go error value into a CEL Err value.
+func WrapErr(err error) ref.Val {
return &Err{error: err}
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (e *Err) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (e *Err) ConvertToNative(typeDesc reflect.Type) (any, error) {
return nil, e.error
}
@@ -114,10 +120,15 @@ func (e *Err) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (e *Err) Value() interface{} {
+func (e *Err) Value() any {
return e.error
}
+// Is implements errors.Is.
+func (e *Err) Is(target error) bool {
+ return e.error.Error() == target.Error()
+}
+
// IsError returns whether the input element ref.Type or ref.Val is equal to
// the ErrType singleton.
func IsError(val ref.Val) bool {
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/int.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/int.go
index 95f25dcd8086..f5a9511c8d1c 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/int.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/int.go
@@ -66,7 +66,7 @@ func (i Int) Add(other ref.Val) ref.Val {
}
val, err := addInt64Checked(int64(i), int64(otherInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -89,7 +89,7 @@ func (i Int) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (i Int) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Int, reflect.Int32:
// Enums are also mapped as int32 derivations.
@@ -176,7 +176,7 @@ func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
case UintType:
u, err := int64ToUint64Checked(int64(i))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(u)
case DoubleType:
@@ -204,7 +204,7 @@ func (i Int) Divide(other ref.Val) ref.Val {
}
val, err := divideInt64Checked(int64(i), int64(otherInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -226,6 +226,11 @@ func (i Int) Equal(other ref.Val) ref.Val {
}
}
+// IsZeroValue returns true if integer is equal to 0
+func (i Int) IsZeroValue() bool {
+ return i == IntZero
+}
+
// Modulo implements traits.Modder.Modulo.
func (i Int) Modulo(other ref.Val) ref.Val {
otherInt, ok := other.(Int)
@@ -234,7 +239,7 @@ func (i Int) Modulo(other ref.Val) ref.Val {
}
val, err := moduloInt64Checked(int64(i), int64(otherInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -247,7 +252,7 @@ func (i Int) Multiply(other ref.Val) ref.Val {
}
val, err := multiplyInt64Checked(int64(i), int64(otherInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -256,7 +261,7 @@ func (i Int) Multiply(other ref.Val) ref.Val {
func (i Int) Negate() ref.Val {
val, err := negateInt64Checked(int64(i))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -269,7 +274,7 @@ func (i Int) Subtract(subtrahend ref.Val) ref.Val {
}
val, err := subtractInt64Checked(int64(i), int64(subtraInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -280,7 +285,7 @@ func (i Int) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (i Int) Value() interface{} {
+func (i Int) Value() any {
return int64(i)
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/iterator.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/iterator.go
index 4906627783d5..9f224ad4ffa3 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/iterator.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/iterator.go
@@ -34,7 +34,7 @@ var (
// interpreter.
type baseIterator struct{}
-func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (any, error) {
return nil, fmt.Errorf("type conversion on iterators not supported")
}
@@ -50,6 +50,6 @@ func (*baseIterator) Type() ref.Type {
return IteratorType
}
-func (*baseIterator) Value() interface{} {
+func (*baseIterator) Value() any {
return nil
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/json_value.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/json_value.go
index cd63b519441b..13a4efe7ada7 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/json_value.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/json_value.go
@@ -25,4 +25,5 @@ var (
jsonValueType = reflect.TypeOf(&structpb.Value{})
jsonListValueType = reflect.TypeOf(&structpb.ListValue{})
jsonStructType = reflect.TypeOf(&structpb.Struct{})
+ jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/list.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/list.go
index 7230f7ea1218..de5f2099bf79 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/list.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/list.go
@@ -17,12 +17,14 @@ package types
import (
"fmt"
"reflect"
+ "strings"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb"
)
@@ -40,13 +42,13 @@ var (
// NewDynamicList returns a traits.Lister with heterogenous elements.
// value should be an array of "native" types, i.e. any type that
// NativeToValue() can convert to a ref.Val.
-func NewDynamicList(adapter ref.TypeAdapter, value interface{}) traits.Lister {
+func NewDynamicList(adapter ref.TypeAdapter, value any) traits.Lister {
refValue := reflect.ValueOf(value)
return &baseList{
TypeAdapter: adapter,
value: value,
size: refValue.Len(),
- get: func(i int) interface{} {
+ get: func(i int) any {
return refValue.Index(i).Interface()
},
}
@@ -58,7 +60,7 @@ func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister {
TypeAdapter: adapter,
value: elems,
size: len(elems),
- get: func(i int) interface{} { return elems[i] },
+ get: func(i int) any { return elems[i] },
}
}
@@ -70,7 +72,7 @@ func NewRefValList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister {
TypeAdapter: adapter,
value: elems,
size: len(elems),
- get: func(i int) interface{} { return elems[i] },
+ get: func(i int) any { return elems[i] },
}
}
@@ -80,7 +82,7 @@ func NewProtoList(adapter ref.TypeAdapter, list protoreflect.List) traits.Lister
TypeAdapter: adapter,
value: list,
size: list.Len(),
- get: func(i int) interface{} { return list.Get(i).Interface() },
+ get: func(i int) any { return list.Get(i).Interface() },
}
}
@@ -91,22 +93,25 @@ func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister {
TypeAdapter: adapter,
value: l,
size: len(vals),
- get: func(i int) interface{} { return vals[i] },
+ get: func(i int) any { return vals[i] },
}
}
// NewMutableList creates a new mutable list whose internal state can be modified.
func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister {
var mutableValues []ref.Val
- return &mutableList{
+ l := &mutableList{
baseList: &baseList{
TypeAdapter: adapter,
value: mutableValues,
size: 0,
- get: func(i int) interface{} { return mutableValues[i] },
},
mutableValues: mutableValues,
}
+ l.get = func(i int) any {
+ return l.mutableValues[i]
+ }
+ return l
}
// baseList points to a list containing elements of any type.
@@ -114,7 +119,7 @@ func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister {
// The `ref.TypeAdapter` enables native type to CEL type conversions.
type baseList struct {
ref.TypeAdapter
- value interface{}
+ value any
// size indicates the number of elements within the list.
// Since objects are immutable the size of a list is static.
@@ -122,7 +127,7 @@ type baseList struct {
// get returns a value at the specified integer index.
// The index is guaranteed to be checked against the list index range.
- get func(int) interface{}
+ get func(int) any
}
// Add implements the traits.Adder interface method.
@@ -157,7 +162,7 @@ func (l *baseList) Contains(elem ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (l *baseList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the underlying list value is assignable to the reflected type return it.
if reflect.TypeOf(l.value).AssignableTo(typeDesc) {
return l.value, nil
@@ -240,7 +245,7 @@ func (l *baseList) Equal(other ref.Val) ref.Val {
// Get implements the traits.Indexer interface method.
func (l *baseList) Get(index ref.Val) ref.Val {
- ind, err := indexOrError(index)
+ ind, err := IndexOrError(index)
if err != nil {
return ValOrErr(index, err.Error())
}
@@ -250,6 +255,11 @@ func (l *baseList) Get(index ref.Val) ref.Val {
return l.NativeToValue(l.get(ind))
}
+// IsZeroValue returns true if the list is empty.
+func (l *baseList) IsZeroValue() bool {
+ return l.size == 0
+}
+
// Iterator implements the traits.Iterable interface method.
func (l *baseList) Iterator() traits.Iterator {
return newListIterator(l)
@@ -266,10 +276,24 @@ func (l *baseList) Type() ref.Type {
}
// Value implements the ref.Val interface method.
-func (l *baseList) Value() interface{} {
+func (l *baseList) Value() any {
return l.value
}
+// String converts the list to a human readable string form.
+func (l *baseList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := 0; i < l.size; i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.get(i)))
+ if i != l.size-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
// mutableList aggregates values into its internal storage. For use with internal CEL variables only.
type mutableList struct {
*baseList
@@ -305,7 +329,7 @@ func (l *mutableList) ToImmutableList() traits.Lister {
// The `ref.TypeAdapter` enables native type to CEL type conversions.
type concatList struct {
ref.TypeAdapter
- value interface{}
+ value any
prevList traits.Lister
nextList traits.Lister
}
@@ -351,8 +375,8 @@ func (l *concatList) Contains(elem ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (l *concatList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- combined := NewDynamicList(l.TypeAdapter, l.Value().([]interface{}))
+func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ combined := NewDynamicList(l.TypeAdapter, l.Value().([]any))
return combined.ConvertToNative(typeDesc)
}
@@ -396,7 +420,7 @@ func (l *concatList) Equal(other ref.Val) ref.Val {
// Get implements the traits.Indexer interface method.
func (l *concatList) Get(index ref.Val) ref.Val {
- ind, err := indexOrError(index)
+ ind, err := IndexOrError(index)
if err != nil {
return ValOrErr(index, err.Error())
}
@@ -408,6 +432,11 @@ func (l *concatList) Get(index ref.Val) ref.Val {
return l.nextList.Get(offset)
}
+// IsZeroValue returns true if the list is empty.
+func (l *concatList) IsZeroValue() bool {
+ return l.Size().(Int) == 0
+}
+
// Iterator implements the traits.Iterable interface method.
func (l *concatList) Iterator() traits.Iterator {
return newListIterator(l)
@@ -418,15 +447,29 @@ func (l *concatList) Size() ref.Val {
return l.prevList.Size().(Int).Add(l.nextList.Size())
}
+// String converts the concatenated list to a human-readable string.
+func (l *concatList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := Int(0); i < l.Size().(Int); i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.Get(i)))
+ if i != l.Size().(Int)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
// Type implements the ref.Val interface method.
func (l *concatList) Type() ref.Type {
return ListType
}
// Value implements the ref.Val interface method.
-func (l *concatList) Value() interface{} {
+func (l *concatList) Value() any {
if l.value == nil {
- merged := make([]interface{}, l.Size().(Int))
+ merged := make([]any, l.Size().(Int))
prevLen := l.prevList.Size().(Int)
for i := Int(0); i < prevLen; i++ {
merged[i] = l.prevList.Get(i).Value()
@@ -469,7 +512,8 @@ func (it *listIterator) Next() ref.Val {
return nil
}
-func indexOrError(index ref.Val) (int, error) {
+// IndexOrError converts an input index value into either a lossless integer index or an error.
+func IndexOrError(index ref.Val) (int, error) {
switch iv := index.(type) {
case Int:
return int(iv), nil
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/map.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/map.go
index 58655940244d..213be4ac9e33 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/map.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/map.go
@@ -17,20 +17,22 @@ package types
import (
"fmt"
"reflect"
+ "strings"
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
"github.com/stoewer/go-strcase"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb"
)
// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
-func NewDynamicMap(adapter ref.TypeAdapter, value interface{}) traits.Mapper {
+func NewDynamicMap(adapter ref.TypeAdapter, value any) traits.Mapper {
refValue := reflect.ValueOf(value)
return &baseMap{
TypeAdapter: adapter,
@@ -65,7 +67,7 @@ func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Map
}
// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values.
-func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]interface{}) traits.Mapper {
+func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]any) traits.Mapper {
return &baseMap{
TypeAdapter: adapter,
mapAccessor: newStringIfaceMapAccessor(adapter, value),
@@ -125,7 +127,7 @@ type baseMap struct {
mapAccessor
// value is the native Go value upon which the map type operators.
- value interface{}
+ value any
// size is the number of entries in the map.
size int
@@ -138,7 +140,7 @@ func (m *baseMap) Contains(index ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the map is already assignable to the desired type return it, e.g. interfaces and
// maps with the same key value types.
if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
@@ -275,18 +277,42 @@ func (m *baseMap) Get(key ref.Val) ref.Val {
return v
}
+// IsZeroValue returns true if the map is empty.
+func (m *baseMap) IsZeroValue() bool {
+ return m.size == 0
+}
+
// Size implements the traits.Sizer interface method.
func (m *baseMap) Size() ref.Val {
return Int(m.size)
}
+// String converts the map into a human-readable string.
+func (m *baseMap) String() string {
+ var sb strings.Builder
+ sb.WriteString("{")
+ it := m.Iterator()
+ i := 0
+ for it.HasNext() == True {
+ k := it.Next()
+ v, _ := m.Find(k)
+ sb.WriteString(fmt.Sprintf("%v: %v", k, v))
+ if i != m.size-1 {
+ sb.WriteString(", ")
+ }
+ i++
+ }
+ sb.WriteString("}")
+ return sb.String()
+}
+
// Type implements the ref.Val interface method.
func (m *baseMap) Type() ref.Type {
return MapType
}
// Value implements the ref.Val interface method.
-func (m *baseMap) Value() interface{} {
+func (m *baseMap) Value() any {
return m.value
}
@@ -498,7 +524,7 @@ func (a *stringMapAccessor) Iterator() traits.Iterator {
}
}
-func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]interface{}) mapAccessor {
+func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]any) mapAccessor {
return &stringIfaceMapAccessor{
TypeAdapter: adapter,
mapVal: mapVal,
@@ -507,7 +533,7 @@ func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]interf
type stringIfaceMapAccessor struct {
ref.TypeAdapter
- mapVal map[string]interface{}
+ mapVal map[string]any
}
// Find uses native map accesses to find the key, returning (value, true) if present.
@@ -556,7 +582,7 @@ func (m *protoMap) Contains(key ref.Val) ref.Val {
// ConvertToNative implements the ref.Val interface method.
//
// Note, assignment to Golang struct types is not yet supported.
-func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the map is already assignable to the desired type return it, e.g. interfaces and
// maps with the same key value types.
switch typeDesc {
@@ -601,9 +627,9 @@ func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
ntvKey := key.Interface()
ntvVal := val.Interface()
- switch ntvVal.(type) {
+ switch pv := ntvVal.(type) {
case protoreflect.Message:
- ntvVal = ntvVal.(protoreflect.Message).Interface()
+ ntvVal = pv.Interface()
}
if keyType == otherKeyType && valType == otherValType {
mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
@@ -732,6 +758,11 @@ func (m *protoMap) Get(key ref.Val) ref.Val {
return v
}
+// IsZeroValue returns true if the map is empty.
+func (m *protoMap) IsZeroValue() bool {
+ return m.value.Len() == 0
+}
+
// Iterator implements the traits.Iterable interface method.
func (m *protoMap) Iterator() traits.Iterator {
// Copy the keys to make their order stable.
@@ -758,7 +789,7 @@ func (m *protoMap) Type() ref.Type {
}
// Value implements the ref.Val interface method.
-func (m *protoMap) Value() interface{} {
+func (m *protoMap) Value() any {
return m.value
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/null.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/null.go
index 3d3503c275e6..38927a112cd6 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/null.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/null.go
@@ -18,9 +18,10 @@ import (
"fmt"
"reflect"
- "github.com/google/cel-go/common/types/ref"
"google.golang.org/protobuf/proto"
+ "github.com/google/cel-go/common/types/ref"
+
anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb"
)
@@ -34,14 +35,20 @@ var (
// NullValue singleton.
NullValue = Null(structpb.NullValue_NULL_VALUE)
- jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
+ // golang reflect type for Null values.
+ nullReflectType = reflect.TypeOf(NullValue)
)
// ConvertToNative implements ref.Val.ConvertToNative.
-func (n Null) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Int32:
- return reflect.ValueOf(n).Convert(typeDesc).Interface(), nil
+ switch typeDesc {
+ case jsonNullType:
+ return structpb.NullValue_NULL_VALUE, nil
+ case nullReflectType:
+ return n, nil
+ }
case reflect.Ptr:
switch typeDesc {
case anyValueType:
@@ -54,6 +61,10 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
return anypb.New(pb.(proto.Message))
case jsonValueType:
return structpb.NewNullValue(), nil
+ case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType,
+ int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType,
+ uint64WrapperType:
+ return nil, nil
}
case reflect.Interface:
nv := n.Value()
@@ -86,12 +97,17 @@ func (n Null) Equal(other ref.Val) ref.Val {
return Bool(NullType == other.Type())
}
+// IsZeroValue returns true as null always represents an absent value.
+func (n Null) IsZeroValue() bool {
+ return true
+}
+
// Type implements ref.Val.Type.
func (n Null) Type() ref.Type {
return NullType
}
// Value implements ref.Val.Value.
-func (n Null) Value() interface{} {
+func (n Null) Value() any {
return structpb.NullValue_NULL_VALUE
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/object.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/object.go
index 5faf8551104b..9955e2dce59b 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/object.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/object.go
@@ -18,11 +18,12 @@ import (
"fmt"
"reflect"
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+
anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb"
)
@@ -52,7 +53,7 @@ func NewObject(adapter ref.TypeAdapter,
typeValue: typeValue}
}
-func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
srcPB := o.value
if reflect.TypeOf(srcPB).AssignableTo(typeDesc) {
return srcPB, nil
@@ -133,6 +134,11 @@ func (o *protoObj) IsSet(field ref.Val) ref.Val {
return False
}
+// IsZeroValue returns true if the protobuf object is empty.
+func (o *protoObj) IsZeroValue() bool {
+ return proto.Equal(o.value, o.typeDesc.Zero())
+}
+
func (o *protoObj) Get(index ref.Val) ref.Val {
protoFieldName, ok := index.(String)
if !ok {
@@ -154,6 +160,6 @@ func (o *protoObj) Type() ref.Type {
return o.typeValue
}
-func (o *protoObj) Value() interface{} {
+func (o *protoObj) Value() any {
return o.value
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/optional.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/optional.go
new file mode 100644
index 000000000000..54cb35b1ab97
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/optional.go
@@ -0,0 +1,108 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+var (
+ // OptionalType indicates the runtime type of an optional value.
+ OptionalType = NewTypeValue("optional")
+
+ // OptionalNone is a sentinel value which is used to indicate an empty optional value.
+ OptionalNone = &Optional{}
+)
+
+// OptionalOf returns an optional value which wraps a concrete CEL value.
+func OptionalOf(value ref.Val) *Optional {
+ return &Optional{value: value}
+}
+
+// Optional value which points to a value if non-empty.
+type Optional struct {
+ value ref.Val
+}
+
+// HasValue returns true if the optional has a value.
+func (o *Optional) HasValue() bool {
+ return o.value != nil
+}
+
+// GetValue returns the wrapped value contained in the optional.
+func (o *Optional) GetValue() ref.Val {
+ if !o.HasValue() {
+ return NewErr("optional.none() dereference")
+ }
+ return o.value
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (o *Optional) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ if !o.HasValue() {
+ return nil, errors.New("optional.none() dereference")
+ }
+ return o.value.ConvertToNative(typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (o *Optional) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case OptionalType:
+ return o
+ case TypeType:
+ return OptionalType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", OptionalType, typeVal)
+}
+
+// Equal determines whether the values contained by two optional values are equal.
+func (o *Optional) Equal(other ref.Val) ref.Val {
+ otherOpt, isOpt := other.(*Optional)
+ if !isOpt {
+ return False
+ }
+ if !o.HasValue() {
+ return Bool(!otherOpt.HasValue())
+ }
+ if !otherOpt.HasValue() {
+ return False
+ }
+ return o.value.Equal(otherOpt.value)
+}
+
+func (o *Optional) String() string {
+ if o.HasValue() {
+ return fmt.Sprintf("optional(%v)", o.GetValue())
+ }
+ return "optional.none()"
+}
+
+// Type implements the ref.Val interface method.
+func (o *Optional) Type() ref.Type {
+ return OptionalType
+}
+
+// Value returns the underlying 'Value()' of the wrapped value, if present.
+func (o *Optional) Value() any {
+ if o.value == nil {
+ return nil
+ }
+ return o.value.Value()
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/enum.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/enum.go
index 4a26b5c7c32c..09a15463086e 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/enum.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/enum.go
@@ -18,9 +18,9 @@ import (
"google.golang.org/protobuf/reflect/protoreflect"
)
-// NewEnumValueDescription produces an enum value description with the fully qualified enum value
+// newEnumValueDescription produces an enum value description with the fully qualified enum value
// name and the enum value descriptor.
-func NewEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
+func newEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
return &EnumValueDescription{
enumValueName: name,
desc: desc,
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/file.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/file.go
index 0bcade75f9a5..e323afb1df30 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/file.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/file.go
@@ -18,32 +18,66 @@ import (
"fmt"
"google.golang.org/protobuf/reflect/protoreflect"
+
+ dynamicpb "google.golang.org/protobuf/types/dynamicpb"
)
-// NewFileDescription returns a FileDescription instance with a complete listing of all the message
-// types and enum values declared within any scope in the file.
-func NewFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) *FileDescription {
+// newFileDescription returns a FileDescription instance with a complete listing of all the message
+// types and enum values, as well as a map of extensions declared within any scope in the file.
+func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDescription, extensionMap) {
metadata := collectFileMetadata(fileDesc)
enums := make(map[string]*EnumValueDescription)
for name, enumVal := range metadata.enumValues {
- enums[name] = NewEnumValueDescription(name, enumVal)
+ enums[name] = newEnumValueDescription(name, enumVal)
}
types := make(map[string]*TypeDescription)
for name, msgType := range metadata.msgTypes {
- types[name] = NewTypeDescription(name, msgType)
+ types[name] = newTypeDescription(name, msgType, pbdb.extensions)
+ }
+ fileExtMap := make(extensionMap)
+ for typeName, extensions := range metadata.msgExtensionMap {
+ messageExtMap, found := fileExtMap[typeName]
+ if !found {
+ messageExtMap = make(map[string]*FieldDescription)
+ }
+ for _, ext := range extensions {
+ extDesc := dynamicpb.NewExtensionType(ext).TypeDescriptor()
+ messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc)
+ }
+ fileExtMap[typeName] = messageExtMap
}
return &FileDescription{
+ name: fileDesc.Path(),
types: types,
enums: enums,
- }
+ }, fileExtMap
}
// FileDescription holds a map of all types and enum values declared within a proto file.
type FileDescription struct {
+ name string
types map[string]*TypeDescription
enums map[string]*EnumValueDescription
}
+// Copy creates a copy of the FileDescription with updated Db references within its types.
+func (fd *FileDescription) Copy(pbdb *Db) *FileDescription {
+ typesCopy := make(map[string]*TypeDescription, len(fd.types))
+ for k, v := range fd.types {
+ typesCopy[k] = v.Copy(pbdb)
+ }
+ return &FileDescription{
+ name: fd.name,
+ types: typesCopy,
+ enums: fd.enums,
+ }
+}
+
+// GetName returns the fully qualified file path for the file.
+func (fd *FileDescription) GetName() string {
+ return fd.name
+}
+
// GetEnumDescription returns an EnumDescription for a qualified enum value
// name declared within the .proto file.
func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) {
@@ -94,6 +128,10 @@ type fileMetadata struct {
msgTypes map[string]protoreflect.MessageDescriptor
// enumValues maps from fully-qualified enum value to enum value descriptor.
enumValues map[string]protoreflect.EnumValueDescriptor
+ // msgExtensionMap maps from the protobuf message name being extended to a set of extensions
+ // for the type.
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor
+
// TODO: support enum type definitions for use in future type-check enhancements.
}
@@ -102,28 +140,38 @@ type fileMetadata struct {
func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata {
msgTypes := make(map[string]protoreflect.MessageDescriptor)
enumValues := make(map[string]protoreflect.EnumValueDescriptor)
- collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues)
+ msgExtensionMap := make(map[string][]protoreflect.ExtensionDescriptor)
+ collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues, msgExtensionMap)
collectEnumValues(fileDesc.Enums(), enumValues)
+ collectExtensions(fileDesc.Extensions(), msgExtensionMap)
return &fileMetadata{
- msgTypes: msgTypes,
- enumValues: enumValues,
+ msgTypes: msgTypes,
+ enumValues: enumValues,
+ msgExtensionMap: msgExtensionMap,
}
}
// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of
// fully qualified protobuf names to descriptors.
-func collectMsgTypes(msgTypes protoreflect.MessageDescriptors, msgTypeMap map[string]protoreflect.MessageDescriptor, enumValueMap map[string]protoreflect.EnumValueDescriptor) {
+func collectMsgTypes(msgTypes protoreflect.MessageDescriptors,
+ msgTypeMap map[string]protoreflect.MessageDescriptor,
+ enumValueMap map[string]protoreflect.EnumValueDescriptor,
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
for i := 0; i < msgTypes.Len(); i++ {
msgType := msgTypes.Get(i)
msgTypeMap[string(msgType.FullName())] = msgType
nestedMsgTypes := msgType.Messages()
if nestedMsgTypes.Len() != 0 {
- collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap)
+ collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap, msgExtensionMap)
}
nestedEnumTypes := msgType.Enums()
if nestedEnumTypes.Len() != 0 {
collectEnumValues(nestedEnumTypes, enumValueMap)
}
+ nestedExtensions := msgType.Extensions()
+ if nestedExtensions.Len() != 0 {
+ collectExtensions(nestedExtensions, msgExtensionMap)
+ }
}
}
@@ -139,3 +187,16 @@ func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[
}
}
}
+
+func collectExtensions(extensions protoreflect.ExtensionDescriptors, msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
+ for i := 0; i < extensions.Len(); i++ {
+ ext := extensions.Get(i)
+ extendsMsg := string(ext.ContainingMessage().FullName())
+ msgExts, found := msgExtensionMap[extendsMsg]
+ if !found {
+ msgExts = []protoreflect.ExtensionDescriptor{}
+ }
+ msgExts = append(msgExts, ext)
+ msgExtensionMap[extendsMsg] = msgExts
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/pb.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/pb.go
index 457b47ceeeeb..eadebcb04e8b 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/pb.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/pb.go
@@ -40,13 +40,19 @@ type Db struct {
revFileDescriptorMap map[string]*FileDescription
// files contains the deduped set of FileDescriptions whose types are contained in the pb.Db.
files []*FileDescription
+ // extensions contains the mapping between a given type name, extension name and its FieldDescription
+ extensions map[string]map[string]*FieldDescription
}
+// extensionsMap is a type alias to a map[typeName]map[extensionName]*FieldDescription
+type extensionMap = map[string]map[string]*FieldDescription
+
var (
// DefaultDb used at evaluation time or unless overridden at check time.
DefaultDb = &Db{
revFileDescriptorMap: make(map[string]*FileDescription),
files: []*FileDescription{},
+ extensions: make(extensionMap),
}
)
@@ -80,6 +86,7 @@ func NewDb() *Db {
pbdb := &Db{
revFileDescriptorMap: make(map[string]*FileDescription),
files: []*FileDescription{},
+ extensions: make(extensionMap),
}
// The FileDescription objects in the default db contain lazily initialized TypeDescription
// values which may point to the state contained in the DefaultDb irrespective of this shallow
@@ -96,19 +103,34 @@ func NewDb() *Db {
// Copy creates a copy of the current database with its own internal descriptor mapping.
func (pbdb *Db) Copy() *Db {
copy := NewDb()
- for k, v := range pbdb.revFileDescriptorMap {
- copy.revFileDescriptorMap[k] = v
- }
- for _, f := range pbdb.files {
+ for _, fd := range pbdb.files {
hasFile := false
- for _, f2 := range copy.files {
- if f2 == f {
+ for _, fd2 := range copy.files {
+ if fd2 == fd {
hasFile = true
}
}
if !hasFile {
- copy.files = append(copy.files, f)
+ fd = fd.Copy(copy)
+ copy.files = append(copy.files, fd)
+ }
+ for _, enumValName := range fd.GetEnumNames() {
+ copy.revFileDescriptorMap[enumValName] = fd
+ }
+ for _, msgTypeName := range fd.GetTypeNames() {
+ copy.revFileDescriptorMap[msgTypeName] = fd
+ }
+ copy.revFileDescriptorMap[fd.GetName()] = fd
+ }
+ for typeName, extFieldMap := range pbdb.extensions {
+ copyExtFieldMap, found := copy.extensions[typeName]
+ if !found {
+ copyExtFieldMap = make(map[string]*FieldDescription, len(extFieldMap))
}
+ for extFieldName, fd := range extFieldMap {
+ copyExtFieldMap[extFieldName] = fd
+ }
+ copy.extensions[typeName] = copyExtFieldMap
}
return copy
}
@@ -137,17 +159,30 @@ func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileD
if err == nil {
fileDesc = globalFD
}
- fd = NewFileDescription(fileDesc, pbdb)
+ var fileExtMap extensionMap
+ fd, fileExtMap = newFileDescription(fileDesc, pbdb)
for _, enumValName := range fd.GetEnumNames() {
pbdb.revFileDescriptorMap[enumValName] = fd
}
for _, msgTypeName := range fd.GetTypeNames() {
pbdb.revFileDescriptorMap[msgTypeName] = fd
}
- pbdb.revFileDescriptorMap[fileDesc.Path()] = fd
+ pbdb.revFileDescriptorMap[fd.GetName()] = fd
// Return the specific file descriptor registered.
pbdb.files = append(pbdb.files, fd)
+
+ // Index the protobuf message extensions from the file into the pbdb
+ for typeName, extMap := range fileExtMap {
+ typeExtMap, found := pbdb.extensions[typeName]
+ if !found {
+ pbdb.extensions[typeName] = extMap
+ continue
+ }
+ for extName, field := range extMap {
+ typeExtMap[extName] = field
+ }
+ }
return fd, nil
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/type.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/type.go
index 912076fa4882..df9532156a06 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/type.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/type.go
@@ -38,22 +38,23 @@ type description interface {
Zero() proto.Message
}
-// NewTypeDescription produces a TypeDescription value for the fully-qualified proto type name
+// newTypeDescription produces a TypeDescription value for the fully-qualified proto type name
// with a given descriptor.
-func NewTypeDescription(typeName string, desc protoreflect.MessageDescriptor) *TypeDescription {
+func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, extensions extensionMap) *TypeDescription {
msgType := dynamicpb.NewMessageType(desc)
msgZero := dynamicpb.NewMessage(desc)
fieldMap := map[string]*FieldDescription{}
fields := desc.Fields()
for i := 0; i < fields.Len(); i++ {
f := fields.Get(i)
- fieldMap[string(f.Name())] = NewFieldDescription(f)
+ fieldMap[string(f.Name())] = newFieldDescription(f)
}
return &TypeDescription{
typeName: typeName,
desc: desc,
msgType: msgType,
fieldMap: fieldMap,
+ extensions: extensions,
reflectType: reflectTypeOf(msgZero),
zeroMsg: zeroValueOf(msgZero),
}
@@ -66,10 +67,24 @@ type TypeDescription struct {
desc protoreflect.MessageDescriptor
msgType protoreflect.MessageType
fieldMap map[string]*FieldDescription
+ extensions extensionMap
reflectType reflect.Type
zeroMsg proto.Message
}
+// Copy copies the type description with updated references to the Db.
+func (td *TypeDescription) Copy(pbdb *Db) *TypeDescription {
+ return &TypeDescription{
+ typeName: td.typeName,
+ desc: td.desc,
+ msgType: td.msgType,
+ fieldMap: td.fieldMap,
+ extensions: pbdb.extensions,
+ reflectType: td.reflectType,
+ zeroMsg: td.zeroMsg,
+ }
+}
+
// FieldMap returns a string field name to FieldDescription map.
func (td *TypeDescription) FieldMap() map[string]*FieldDescription {
return td.fieldMap
@@ -78,16 +93,21 @@ func (td *TypeDescription) FieldMap() map[string]*FieldDescription {
// FieldByName returns (FieldDescription, true) if the field name is declared within the type.
func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) {
fd, found := td.fieldMap[name]
+ if found {
+ return fd, true
+ }
+ extFieldMap, found := td.extensions[td.typeName]
if !found {
return nil, false
}
- return fd, true
+ fd, found = extFieldMap[name]
+ return fd, found
}
// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible.
//
// This method returns the unwrapped value and 'true', else the original value and 'false'.
-func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (interface{}, bool, error) {
+func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (any, bool, error) {
return unwrap(td, msg)
}
@@ -111,8 +131,8 @@ func (td *TypeDescription) Zero() proto.Message {
return td.zeroMsg
}
-// NewFieldDescription creates a new field description from a protoreflect.FieldDescriptor.
-func NewFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription {
+// newFieldDescription creates a new field description from a protoreflect.FieldDescriptor.
+func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription {
var reflectType reflect.Type
var zeroMsg proto.Message
switch fieldDesc.Kind() {
@@ -124,9 +144,17 @@ func NewFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescripti
default:
reflectType = reflectTypeOf(fieldDesc.Default().Interface())
if fieldDesc.IsList() {
- parentMsg := dynamicpb.NewMessage(fieldDesc.ContainingMessage())
- listField := parentMsg.NewField(fieldDesc).List()
- elem := listField.NewElement().Interface()
+ var elemValue protoreflect.Value
+ if fieldDesc.IsExtension() {
+ et := dynamicpb.NewExtensionType(fieldDesc)
+ elemValue = et.New().List().NewElement()
+ } else {
+ parentMsgType := fieldDesc.ContainingMessage()
+ parentMsg := dynamicpb.NewMessage(parentMsgType)
+ listField := parentMsg.NewField(fieldDesc).List()
+ elemValue = listField.NewElement()
+ }
+ elem := elemValue.Interface()
switch elemType := elem.(type) {
case protoreflect.Message:
elem = elemType.Interface()
@@ -140,8 +168,8 @@ func NewFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescripti
}
var keyType, valType *FieldDescription
if fieldDesc.IsMap() {
- keyType = NewFieldDescription(fieldDesc.MapKey())
- valType = NewFieldDescription(fieldDesc.MapValue())
+ keyType = newFieldDescription(fieldDesc.MapKey())
+ valType = newFieldDescription(fieldDesc.MapValue())
}
return &FieldDescription{
desc: fieldDesc,
@@ -195,7 +223,7 @@ func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor {
//
// This function implements the FieldType.IsSet function contract which can be used to operate on
// more than just protobuf field accesses; however, the target here must be a protobuf.Message.
-func (fd *FieldDescription) IsSet(target interface{}) bool {
+func (fd *FieldDescription) IsSet(target any) bool {
switch v := target.(type) {
case proto.Message:
pbRef := v.ProtoReflect()
@@ -219,14 +247,14 @@ func (fd *FieldDescription) IsSet(target interface{}) bool {
//
// This function implements the FieldType.GetFrom function contract which can be used to operate
// on more than just protobuf field accesses; however, the target here must be a protobuf.Message.
-func (fd *FieldDescription) GetFrom(target interface{}) (interface{}, error) {
+func (fd *FieldDescription) GetFrom(target any) (any, error) {
v, ok := target.(proto.Message)
if !ok {
return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target)
}
pbRef := v.ProtoReflect()
pbDesc := pbRef.Descriptor()
- var fieldVal interface{}
+ var fieldVal any
if pbDesc == fd.desc.ContainingMessage() {
// When the target protobuf shares the same message descriptor instance as the field
// descriptor, use the cached field descriptor value.
@@ -289,7 +317,7 @@ func (fd *FieldDescription) IsList() bool {
//
// This function returns the unwrapped value and 'true' on success, or the original value
// and 'false' otherwise.
-func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (interface{}, bool, error) {
+func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (any, bool, error) {
return unwrapDynamic(fd, msg)
}
@@ -362,7 +390,7 @@ func checkedWrap(t *exprpb.Type) *exprpb.Type {
// input message is a *dynamicpb.Message which obscures the typing information from Go.
//
// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
-func unwrap(desc description, msg proto.Message) (interface{}, bool, error) {
+func unwrap(desc description, msg proto.Message) (any, bool, error) {
switch v := msg.(type) {
case *anypb.Any:
dynMsg, err := v.UnmarshalNew()
@@ -418,7 +446,7 @@ func unwrap(desc description, msg proto.Message) (interface{}, bool, error) {
// unwrapDynamic unwraps a reflected protobuf Message value.
//
// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
-func unwrapDynamic(desc description, refMsg protoreflect.Message) (interface{}, bool, error) {
+func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, error) {
msg := refMsg.Interface()
if !refMsg.IsValid() {
msg = desc.Zero()
@@ -508,7 +536,7 @@ func unwrapDynamic(desc description, refMsg protoreflect.Message) (interface{},
// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve
// well-known protobuf reflected types expected by the CEL type system.
-func reflectTypeOf(val interface{}) reflect.Type {
+func reflectTypeOf(val any) reflect.Type {
switch v := val.(type) {
case proto.Message:
return reflect.TypeOf(zeroValueOf(v))
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/provider.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/provider.go
index 02087d14e343..e66951f5b248 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/provider.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/provider.go
@@ -19,11 +19,12 @@ import (
"reflect"
"time"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
"github.com/google/cel-go/common/types/pb"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
anypb "google.golang.org/protobuf/types/known/anypb"
@@ -195,7 +196,7 @@ func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error {
// providing support for custom proto-based types.
//
// This method should be the inverse of ref.Val.ConvertToNative.
-func (p *protoTypeRegistry) NativeToValue(value interface{}) ref.Val {
+func (p *protoTypeRegistry) NativeToValue(value any) ref.Val {
if val, found := nativeToValue(p, value); found {
return val
}
@@ -249,7 +250,7 @@ var (
)
// NativeToValue implements the ref.TypeAdapter interface.
-func (a *defaultTypeAdapter) NativeToValue(value interface{}) ref.Val {
+func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val {
if val, found := nativeToValue(a, value); found {
return val
}
@@ -258,7 +259,7 @@ func (a *defaultTypeAdapter) NativeToValue(value interface{}) ref.Val {
// nativeToValue returns the converted (ref.Val, true) of a conversion is found,
// otherwise (nil, false)
-func nativeToValue(a ref.TypeAdapter, value interface{}) (ref.Val, bool) {
+func nativeToValue(a ref.TypeAdapter, value any) (ref.Val, bool) {
switch v := value.(type) {
case nil:
return NullValue, true
@@ -364,7 +365,7 @@ func nativeToValue(a ref.TypeAdapter, value interface{}) (ref.Val, bool) {
// specializations for common map types.
case map[string]string:
return NewStringStringMap(a, v), true
- case map[string]interface{}:
+ case map[string]any:
return NewStringInterfaceMap(a, v), true
case map[ref.Val]ref.Val:
return NewRefValMap(a, v), true
@@ -479,9 +480,12 @@ func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val re
if err != nil {
return fieldTypeConversionError(field, err)
}
- switch v.(type) {
+ if v == nil {
+ return nil
+ }
+ switch pv := v.(type) {
case proto.Message:
- v = v.(proto.Message).ProtoReflect()
+ v = pv.ProtoReflect()
}
target.Set(field.Descriptor(), protoreflect.ValueOf(v))
return nil
@@ -495,6 +499,9 @@ func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, l
if err != nil {
return fieldTypeConversionError(listField, err)
}
+ if elemVal == nil {
+ continue
+ }
switch ev := elemVal.(type) {
case proto.Message:
elemVal = ev.ProtoReflect()
@@ -519,9 +526,12 @@ func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapV
if err != nil {
return fieldTypeConversionError(mapField, err)
}
- switch v.(type) {
+ if v == nil {
+ continue
+ }
+ switch pv := v.(type) {
case proto.Message:
- v = v.(proto.Message).ProtoReflect()
+ v = pv.ProtoReflect()
}
target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v))
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/provider.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/provider.go
index 91a711fa7076..7eabbb9ca388 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/provider.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/provider.go
@@ -39,8 +39,6 @@ type TypeProvider interface {
// FieldFieldType returns the field type for a checked type value. Returns
// false if the field could not be found.
- //
- // Used during type-checking only.
FindFieldType(messageType string, fieldName string) (*FieldType, bool)
// NewValue creates a new type value from a qualified name and map of field
@@ -55,7 +53,7 @@ type TypeProvider interface {
// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values.
type TypeAdapter interface {
// NativeToValue converts the input `value` to a CEL `ref.Val`.
- NativeToValue(value interface{}) Val
+ NativeToValue(value any) Val
}
// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider`
@@ -97,7 +95,7 @@ type FieldType struct {
}
// FieldTester is used to test field presence on an input object.
-type FieldTester func(target interface{}) bool
+type FieldTester func(target any) bool
// FieldGetter is used to get the field value from an input object, if set.
-type FieldGetter func(target interface{}) (interface{}, error)
+type FieldGetter func(target any) (any, error)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/reference.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/reference.go
index 3098580c9106..5921ffd81f36 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/reference.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/reference.go
@@ -37,7 +37,7 @@ type Type interface {
type Val interface {
// ConvertToNative converts the Value to a native Go struct according to the
// reflected type description, or error if the conversion is not feasible.
- ConvertToNative(typeDesc reflect.Type) (interface{}, error)
+ ConvertToNative(typeDesc reflect.Type) (any, error)
// ConvertToType supports type conversions between value types supported by the expression language.
ConvertToType(typeValue Type) Val
@@ -50,5 +50,5 @@ type Val interface {
// Value returns the raw value of the instance which may not be directly compatible with the expression
// language types.
- Value() interface{}
+ Value() any
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/string.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/string.go
index b6d665683c5c..a65cc14e4c50 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/string.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/string.go
@@ -72,7 +72,7 @@ func (s String) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (s String) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.String:
if reflect.TypeOf(s).AssignableTo(typeDesc) {
@@ -154,6 +154,11 @@ func (s String) Equal(other ref.Val) ref.Val {
return Bool(ok && s == otherString)
}
+// IsZeroValue returns true if the string is empty.
+func (s String) IsZeroValue() bool {
+ return len(s) == 0
+}
+
// Match implements traits.Matcher.Match.
func (s String) Match(pattern ref.Val) ref.Val {
pat, ok := pattern.(String)
@@ -189,7 +194,7 @@ func (s String) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (s String) Value() interface{} {
+func (s String) Value() any {
return string(s)
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/timestamp.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/timestamp.go
index 7513a1b210ad..c784f2e54be4 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/timestamp.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/timestamp.go
@@ -89,7 +89,7 @@ func (t Timestamp) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the timestamp is already assignable to the desired type return it.
if reflect.TypeOf(t.Time).AssignableTo(typeDesc) {
return t.Time, nil
@@ -138,6 +138,11 @@ func (t Timestamp) Equal(other ref.Val) ref.Val {
return Bool(ok && t.Time.Equal(otherTime.Time))
}
+// IsZeroValue returns true if the timestamp is epoch 0.
+func (t Timestamp) IsZeroValue() bool {
+ return t.IsZero()
+}
+
// Receive implements traits.Receiver.Receive.
func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val {
switch len(args) {
@@ -160,14 +165,14 @@ func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val {
dur := subtrahend.(Duration)
val, err := subtractTimeDurationChecked(t.Time, dur.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return timestampOf(val)
case TimestampType:
t2 := subtrahend.(Timestamp).Time
val, err := subtractTimeChecked(t.Time, t2)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return durationOf(val)
}
@@ -180,7 +185,7 @@ func (t Timestamp) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (t Timestamp) Value() interface{} {
+func (t Timestamp) Value() any {
return t.Time
}
@@ -288,7 +293,7 @@ func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
if ind == -1 {
loc, err := time.LoadLocation(val)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return visitor(t.In(loc))
}
@@ -297,11 +302,11 @@ func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
// in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
hr, err := strconv.Atoi(string(val[0:ind]))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
min, err := strconv.Atoi(string(val[ind+1:]))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
var offset int
if string(val[0]) == "-" {
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
index 86e54af61a17..b19eb8301e27 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
@@ -20,6 +20,7 @@ go_library(
"receiver.go",
"sizer.go",
"traits.go",
+ "zeroer.go",
],
importpath = "github.com/google/cel-go/common/types/traits",
deps = [
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/coster.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
similarity index 50%
rename from cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/coster.go
rename to cluster-autoscaler/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
index ac573d5745b8..0b7c830a2465 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/coster.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,24 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package interpreter
+package traits
-import "math"
-
-// TODO: remove Coster.
-
-// Coster calculates the heuristic cost incurred during evaluation.
-// Deprecated: Please migrate cel.EstimateCost, it supports length estimates for input data and cost estimates for
-// extension functions.
-type Coster interface {
- Cost() (min, max int64)
-}
-
-// estimateCost returns the heuristic cost interval for the program.
-func estimateCost(i interface{}) (min, max int64) {
- c, ok := i.(Coster)
- if !ok {
- return 0, math.MaxInt64
- }
- return c.Cost()
+// Zeroer interface for testing whether a CEL value is a zero value for its type.
+type Zeroer interface {
+ // IsZeroValue indicates whether the object is the zero value for the type.
+ IsZeroValue() bool
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/type.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/type.go
index 21160974bbfa..164a4605033a 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/type.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/type.go
@@ -53,7 +53,7 @@ func NewObjectTypeValue(name string) *TypeValue {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (any, error) {
// TODO: replace the internal type representation with a proto-value.
return nil, fmt.Errorf("type conversion not supported for 'type'")
}
@@ -97,6 +97,6 @@ func (t *TypeValue) TypeName() string {
}
// Value implements ref.Val.Value.
-func (t *TypeValue) Value() interface{} {
+func (t *TypeValue) Value() any {
return t.name
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/uint.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/uint.go
index ca266e045761..615c7ec5230b 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/uint.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/uint.go
@@ -59,7 +59,7 @@ func (i Uint) Add(other ref.Val) ref.Val {
}
val, err := addUint64Checked(uint64(i), uint64(otherUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(val)
}
@@ -82,7 +82,7 @@ func (i Uint) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (i Uint) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Uint, reflect.Uint32:
v, err := uint64ToUint32Checked(uint64(i))
@@ -149,7 +149,7 @@ func (i Uint) ConvertToType(typeVal ref.Type) ref.Val {
case IntType:
v, err := uint64ToInt64Checked(uint64(i))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(v)
case UintType:
@@ -172,7 +172,7 @@ func (i Uint) Divide(other ref.Val) ref.Val {
}
div, err := divideUint64Checked(uint64(i), uint64(otherUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(div)
}
@@ -194,6 +194,11 @@ func (i Uint) Equal(other ref.Val) ref.Val {
}
}
+// IsZeroValue returns true if the uint is zero.
+func (i Uint) IsZeroValue() bool {
+ return i == 0
+}
+
// Modulo implements traits.Modder.Modulo.
func (i Uint) Modulo(other ref.Val) ref.Val {
otherUint, ok := other.(Uint)
@@ -202,7 +207,7 @@ func (i Uint) Modulo(other ref.Val) ref.Val {
}
mod, err := moduloUint64Checked(uint64(i), uint64(otherUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(mod)
}
@@ -215,7 +220,7 @@ func (i Uint) Multiply(other ref.Val) ref.Val {
}
val, err := multiplyUint64Checked(uint64(i), uint64(otherUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(val)
}
@@ -228,7 +233,7 @@ func (i Uint) Subtract(subtrahend ref.Val) ref.Val {
}
val, err := subtractUint64Checked(uint64(i), uint64(subtraUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(val)
}
@@ -239,7 +244,7 @@ func (i Uint) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (i Uint) Value() interface{} {
+func (i Uint) Value() any {
return uint64(i)
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/unknown.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/unknown.go
index 95b47426fd36..bc411c15b92b 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/unknown.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/unknown.go
@@ -30,7 +30,7 @@ var (
)
// ConvertToNative implements ref.Val.ConvertToNative.
-func (u Unknown) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (u Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) {
return u.Value(), nil
}
@@ -50,7 +50,7 @@ func (u Unknown) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (u Unknown) Value() interface{} {
+func (u Unknown) Value() any {
return []int64(u)
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/BUILD.bazel
index 9c2520b40849..2f1003aba88a 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/BUILD.bazel
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/BUILD.bazel
@@ -9,14 +9,29 @@ go_library(
srcs = [
"encoders.go",
"guards.go",
+ "math.go",
+ "native.go",
+ "protos.go",
"strings.go",
],
importpath = "github.com/google/cel-go/ext",
visibility = ["//visibility:public"],
deps = [
"//cel:go_default_library",
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/overloads:go_default_library",
"//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
"//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//interpreter:go_default_library",
+ "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_x_text//language:go_default_library",
+ "@org_golang_x_text//message:go_default_library",
],
)
@@ -25,6 +40,9 @@ go_test(
size = "small",
srcs = [
"encoders_test.go",
+ "math_test.go",
+ "native_test.go",
+ "protos_test.go",
"strings_test.go",
],
embed = [
@@ -32,5 +50,17 @@ go_test(
],
deps = [
"//cel:go_default_library",
+ "//checker:go_default_library",
+ "//common:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
],
)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/README.md b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/README.md
index 5ddcc41510a8..c4faf59ab1d5 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/README.md
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/README.md
@@ -31,6 +31,102 @@ Example:
base64.encode(b'hello') // return 'aGVsbG8='
+## Math
+
+Math returns a cel.EnvOption to configure namespaced math helper macros and
+functions.
+
+Note, all macros use the 'math' namespace; however, at the time of macro
+expansion the namespace looks just like any other identifier. If you are
+currently using a variable named 'math', the macro will likely work just as
+intended; however, there is some chance for collision.
+
+### Math.Greatest
+
+Returns the greatest valued number present in the arguments to the macro.
+
+Greatest is a variable argument count macro which must take at least one
+argument. Simple numeric and list literals are supported as valid argument
+types; however, other literals will be flagged as errors during macro
+expansion. If the argument expression does not resolve to a numeric or
+list(numeric) type during type-checking, or during runtime then an error
+will be produced. If a list argument is empty, this too will produce an
+error.
+
+ math.greatest(, ...) ->
+
+Examples:
+
+ math.greatest(1) // 1
+ math.greatest(1u, 2u) // 2u
+ math.greatest(-42.0, -21.5, -100.0) // -21.5
+ math.greatest([-42.0, -21.5, -100.0]) // -21.5
+ math.greatest(numbers) // numbers must be list(numeric)
+
+ math.greatest() // parse error
+ math.greatest('string') // parse error
+ math.greatest(a, b) // check-time error if a or b is non-numeric
+ math.greatest(dyn('string')) // runtime error
+
+### Math.Least
+
+Returns the least valued number present in the arguments to the macro.
+
+Least is a variable argument count macro which must take at least one
+argument. Simple numeric and list literals are supported as valid argument
+types; however, other literals will be flagged as errors during macro
+expansion. If the argument expression does not resolve to a numeric or
+list(numeric) type during type-checking, or during runtime then an error
+will be produced. If a list argument is empty, this too will produce an error.
+
+ math.least(, ...) ->
+
+Examples:
+
+ math.least(1) // 1
+ math.least(1u, 2u) // 1u
+ math.least(-42.0, -21.5, -100.0) // -100.0
+ math.least([-42.0, -21.5, -100.0]) // -100.0
+ math.least(numbers) // numbers must be list(numeric)
+
+ math.least() // parse error
+ math.least('string') // parse error
+ math.least(a, b) // check-time error if a or b is non-numeric
+ math.least(dyn('string')) // runtime error
+
+## Protos
+
+Protos returns a cel.EnvOption to configure extended macros and functions for
+proto manipulation.
+
+Note, all macros use the 'proto' namespace; however, at the time of macro
+expansion the namespace looks just like any other identifier. If you are
+currently using a variable named 'proto', the macro will likely work just as
+you intend; however, there is some chance for collision.
+
+### Protos.GetExt
+
+Macro which generates a select expression that retrieves an extension field
+from the input proto2 syntax message. If the field is not set, the default
+value forthe extension field is returned according to safe-traversal semantics.
+
+ proto.getExt(, ) ->
+
+Example:
+
+ proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value
+
+### Protos.HasExt
+
+Macro which generates a test-only select expression that determines whether
+an extension field is set on a proto2 syntax message.
+
+ proto.hasExt(, ) ->
+
+Example:
+
+ proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false
+
## Strings
Extended functions for string manipulation. As a general note, all indices are
@@ -70,6 +166,23 @@ Examples:
'hello mellow'.indexOf('ello', 2) // returns 7
'hello mellow'.indexOf('ello', 20) // error
+### Join
+
+Returns a new string where the elements of string list are concatenated.
+
+The function also accepts an optional separator which is placed between
+elements in the resulting string.
+
+ >.join() ->
+ >.join() ->
+
+Examples:
+
+ ['hello', 'mellow'].join() // returns 'hellomellow'
+ ['hello', 'mellow'].join(' ') // returns 'hello mellow'
+ [].join() // returns ''
+ [].join('/') // returns ''
+
### LastIndexOf
Returns the integer index of the last occurrence of the search string. If the
@@ -105,6 +218,20 @@ Examples:
'TacoCat'.lowerAscii() // returns 'tacocat'
'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
+### Quote
+
+**Introduced in version 1**
+
+Takes the given string and makes it safe to print (without any formatting due to escape sequences).
+If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD.
+
+ strings.quote()
+
+Examples:
+
+ strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""'
+ strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"'
+
### Replace
Returns a new string based on the target, which replaces the occurrences of a
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/encoders.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/encoders.go
index 22e38c39f915..d9f9cb5152d8 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/encoders.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/encoders.go
@@ -26,34 +26,38 @@ import (
// Encoders returns a cel.EnvOption to configure extended functions for string, byte, and object
// encodings.
//
-// Base64.Decode
+// # Base64.Decode
//
// Decodes base64-encoded string to bytes.
//
// This function will return an error if the string input is not base64-encoded.
//
-// base64.decode() ->
+// base64.decode() ->
//
// Examples:
//
-// base64.decode('aGVsbG8=') // return b'hello'
-// base64.decode('aGVsbG8') // error
+// base64.decode('aGVsbG8=') // return b'hello'
+// base64.decode('aGVsbG8') // error
//
-// Base64.Encode
+// # Base64.Encode
//
// Encodes bytes to a base64-encoded string.
//
-// base64.encode() ->
+// base64.encode() ->
//
// Examples:
//
-// base64.encode(b'hello') // return b'aGVsbG8='
+// base64.encode(b'hello') // return b'aGVsbG8='
func Encoders() cel.EnvOption {
return cel.Lib(encoderLib{})
}
type encoderLib struct{}
+func (encoderLib) LibraryName() string {
+ return "cel.lib.ext.encoders"
+}
+
func (encoderLib) CompileOptions() []cel.EnvOption {
return []cel.EnvOption{
cel.Function("base64.decode",
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/guards.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/guards.go
index 0794f859b502..4c7786a690b2 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/guards.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/guards.go
@@ -17,6 +17,7 @@ package ext
import (
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// function invocation guards for common call signatures within extension functions.
@@ -48,3 +49,15 @@ func listStringOrError(strs []string, err error) ref.Val {
}
return types.DefaultTypeAdapter.NativeToValue(strs)
}
+
+func macroTargetMatchesNamespace(ns string, target *exprpb.Expr) bool {
+ switch target.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ if target.GetIdentExpr().GetName() != ns {
+ return false
+ }
+ return true
+ default:
+ return false
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/math.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/math.go
new file mode 100644
index 000000000000..79b0c8bf948d
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/math.go
@@ -0,0 +1,388 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Math returns a cel.EnvOption to configure namespaced math helper macros and
+// functions.
+//
+// Note, all macros use the 'math' namespace; however, at the time of macro
+// expansion the namespace looks just like any other identifier. If you are
+// currently using a variable named 'math', the macro will likely work just as
+// intended; however, there is some chance for collision.
+//
+// # Math.Greatest
+//
+// Returns the greatest valued number present in the arguments to the macro.
+//
+// Greatest is a variable argument count macro which must take at least one
+// argument. Simple numeric and list literals are supported as valid argument
+// types; however, other literals will be flagged as errors during macro
+// expansion. If the argument expression does not resolve to a numeric or
+// list(numeric) type during type-checking, or during runtime then an error
+// will be produced. If a list argument is empty, this too will produce an
+// error.
+//
+// math.greatest(, ...) ->
+//
+// Examples:
+//
+// math.greatest(1) // 1
+// math.greatest(1u, 2u) // 2u
+// math.greatest(-42.0, -21.5, -100.0) // -21.5
+// math.greatest([-42.0, -21.5, -100.0]) // -21.5
+// math.greatest(numbers) // numbers must be list(numeric)
+//
+// math.greatest() // parse error
+// math.greatest('string') // parse error
+// math.greatest(a, b) // check-time error if a or b is non-numeric
+// math.greatest(dyn('string')) // runtime error
+//
+// # Math.Least
+//
+// Returns the least valued number present in the arguments to the macro.
+//
+// Least is a variable argument count macro which must take at least one
+// argument. Simple numeric and list literals are supported as valid argument
+// types; however, other literals will be flagged as errors during macro
+// expansion. If the argument expression does not resolve to a numeric or
+// list(numeric) type during type-checking, or during runtime then an error
+// will be produced. If a list argument is empty, this too will produce an
+// error.
+//
+// math.least(, ...) ->
+//
+// Examples:
+//
+// math.least(1) // 1
+// math.least(1u, 2u) // 1u
+// math.least(-42.0, -21.5, -100.0) // -100.0
+// math.least([-42.0, -21.5, -100.0]) // -100.0
+// math.least(numbers) // numbers must be list(numeric)
+//
+// math.least() // parse error
+// math.least('string') // parse error
+// math.least(a, b) // check-time error if a or b is non-numeric
+// math.least(dyn('string')) // runtime error
+func Math() cel.EnvOption {
+ return cel.Lib(mathLib{})
+}
+
+var (
+ mathNamespace = "math"
+ leastMacro = "least"
+ greatestMacro = "greatest"
+ minFunc = "math.@min"
+ maxFunc = "math.@max"
+)
+
+type mathLib struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (mathLib) LibraryName() string {
+ return "cel.lib.ext.math"
+}
+
+// CompileOptions implements the Library interface method.
+func (mathLib) CompileOptions() []cel.EnvOption {
+ return []cel.EnvOption{
+ cel.Macros(
+ // math.least(num, ...)
+ cel.NewReceiverVarArgMacro(leastMacro, mathLeast),
+ // math.greatest(num, ...)
+ cel.NewReceiverVarArgMacro(greatestMacro, mathGreatest),
+ ),
+ cel.Function(minFunc,
+ cel.Overload("math_@min_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@min_int", []*cel.Type{cel.IntType}, cel.IntType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@min_uint", []*cel.Type{cel.UintType}, cel.UintType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@min_double_double", []*cel.Type{cel.DoubleType, cel.DoubleType}, cel.DoubleType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_int_uint", []*cel.Type{cel.IntType, cel.UintType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_int_double", []*cel.Type{cel.IntType, cel.DoubleType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_double_int", []*cel.Type{cel.DoubleType, cel.IntType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_double_uint", []*cel.Type{cel.DoubleType, cel.UintType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_uint_double", []*cel.Type{cel.UintType, cel.DoubleType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_list_double", []*cel.Type{cel.ListType(cel.DoubleType)}, cel.DoubleType,
+ cel.UnaryBinding(minList)),
+ cel.Overload("math_@min_list_int", []*cel.Type{cel.ListType(cel.IntType)}, cel.IntType,
+ cel.UnaryBinding(minList)),
+ cel.Overload("math_@min_list_uint", []*cel.Type{cel.ListType(cel.UintType)}, cel.UintType,
+ cel.UnaryBinding(minList)),
+ ),
+ cel.Function(maxFunc,
+ cel.Overload("math_@max_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@max_int", []*cel.Type{cel.IntType}, cel.IntType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@max_uint", []*cel.Type{cel.UintType}, cel.UintType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@max_double_double", []*cel.Type{cel.DoubleType, cel.DoubleType}, cel.DoubleType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_int_uint", []*cel.Type{cel.IntType, cel.UintType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_int_double", []*cel.Type{cel.IntType, cel.DoubleType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_double_int", []*cel.Type{cel.DoubleType, cel.IntType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_double_uint", []*cel.Type{cel.DoubleType, cel.UintType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_uint_double", []*cel.Type{cel.UintType, cel.DoubleType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_list_double", []*cel.Type{cel.ListType(cel.DoubleType)}, cel.DoubleType,
+ cel.UnaryBinding(maxList)),
+ cel.Overload("math_@max_list_int", []*cel.Type{cel.ListType(cel.IntType)}, cel.IntType,
+ cel.UnaryBinding(maxList)),
+ cel.Overload("math_@max_list_uint", []*cel.Type{cel.ListType(cel.UintType)}, cel.UintType,
+ cel.UnaryBinding(maxList)),
+ ),
+ }
+}
+
+// ProgramOptions implements the Library interface method.
+func (mathLib) ProgramOptions() []cel.ProgramOption {
+ return []cel.ProgramOption{}
+}
+
+func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(mathNamespace, target) {
+ return nil, nil
+ }
+ switch len(args) {
+ case 0:
+ return nil, &common.Error{
+ Message: "math.least() requires at least one argument",
+ Location: meh.OffsetLocation(target.GetId()),
+ }
+ case 1:
+ if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
+ return meh.GlobalCall(minFunc, args[0]), nil
+ }
+ return nil, &common.Error{
+ Message: "math.least() invalid single argument value",
+ Location: meh.OffsetLocation(args[0].GetId()),
+ }
+ case 2:
+ err := checkInvalidArgs(meh, "math.least()", args)
+ if err != nil {
+ return nil, err
+ }
+ return meh.GlobalCall(minFunc, args...), nil
+ default:
+ err := checkInvalidArgs(meh, "math.least()", args)
+ if err != nil {
+ return nil, err
+ }
+ return meh.GlobalCall(minFunc, meh.NewList(args...)), nil
+ }
+}
+
+func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(mathNamespace, target) {
+ return nil, nil
+ }
+ switch len(args) {
+ case 0:
+ return nil, &common.Error{
+ Message: "math.greatest() requires at least one argument",
+ Location: meh.OffsetLocation(target.GetId()),
+ }
+ case 1:
+ if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
+ return meh.GlobalCall(maxFunc, args[0]), nil
+ }
+ return nil, &common.Error{
+ Message: "math.greatest() invalid single argument value",
+ Location: meh.OffsetLocation(args[0].GetId()),
+ }
+ case 2:
+ err := checkInvalidArgs(meh, "math.greatest()", args)
+ if err != nil {
+ return nil, err
+ }
+ return meh.GlobalCall(maxFunc, args...), nil
+ default:
+ err := checkInvalidArgs(meh, "math.greatest()", args)
+ if err != nil {
+ return nil, err
+ }
+ return meh.GlobalCall(maxFunc, meh.NewList(args...)), nil
+ }
+}
+
+func identity(val ref.Val) ref.Val {
+ return val
+}
+
+func minPair(first, second ref.Val) ref.Val {
+ cmp, ok := first.(traits.Comparer)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(first)
+ }
+ out := cmp.Compare(second)
+ if types.IsUnknownOrError(out) {
+ return maybeSuffixError(out, "math.@min")
+ }
+ if out == types.IntOne {
+ return second
+ }
+ return first
+}
+
+func minList(numList ref.Val) ref.Val {
+ l := numList.(traits.Lister)
+ size := l.Size().(types.Int)
+ if size == types.IntZero {
+ return types.NewErr("math.@min(list) argument must not be empty")
+ }
+ min := l.Get(types.IntZero)
+ for i := types.IntOne; i < size; i++ {
+ min = minPair(min, l.Get(i))
+ }
+ switch min.Type() {
+ case types.IntType, types.DoubleType, types.UintType, types.UnknownType:
+ return min
+ default:
+ return types.NewErr("no such overload: math.@min")
+ }
+}
+
+func maxPair(first, second ref.Val) ref.Val {
+ cmp, ok := first.(traits.Comparer)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(first)
+ }
+ out := cmp.Compare(second)
+ if types.IsUnknownOrError(out) {
+ return maybeSuffixError(out, "math.@max")
+ }
+ if out == types.IntNegOne {
+ return second
+ }
+ return first
+}
+
+func maxList(numList ref.Val) ref.Val {
+ l := numList.(traits.Lister)
+ size := l.Size().(types.Int)
+ if size == types.IntZero {
+ return types.NewErr("math.@max(list) argument must not be empty")
+ }
+ max := l.Get(types.IntZero)
+ for i := types.IntOne; i < size; i++ {
+ max = maxPair(max, l.Get(i))
+ }
+ switch max.Type() {
+ case types.IntType, types.DoubleType, types.UintType, types.UnknownType:
+ return max
+ default:
+ return types.NewErr("no such overload: math.@max")
+ }
+}
+
+func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *common.Error {
+ for _, arg := range args {
+ err := checkInvalidArgLiteral(funcName, arg)
+ if err != nil {
+ return &common.Error{
+ Message: err.Error(),
+ Location: meh.OffsetLocation(arg.GetId()),
+ }
+ }
+ }
+ return nil
+}
+
+func checkInvalidArgLiteral(funcName string, arg *exprpb.Expr) error {
+ if !isValidArgType(arg) {
+ return fmt.Errorf("%s simple literal arguments must be numeric", funcName)
+ }
+ return nil
+}
+
+func isValidArgType(arg *exprpb.Expr) bool {
+ switch arg.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ c := arg.GetConstExpr()
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_DoubleValue, *exprpb.Constant_Int64Value, *exprpb.Constant_Uint64Value:
+ return true
+ default:
+ return false
+ }
+ case *exprpb.Expr_ListExpr, *exprpb.Expr_StructExpr:
+ return false
+ default:
+ return true
+ }
+}
+
+func isListLiteralWithValidArgs(arg *exprpb.Expr) bool {
+ switch arg.GetExprKind().(type) {
+ case *exprpb.Expr_ListExpr:
+ list := arg.GetListExpr()
+ if len(list.GetElements()) == 0 {
+ return false
+ }
+ for _, e := range list.GetElements() {
+ if !isValidArgType(e) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func maybeSuffixError(val ref.Val, suffix string) ref.Val {
+ if types.IsError(val) {
+ msg := val.(*types.Err).String()
+ if !strings.Contains(msg, suffix) {
+ return types.NewErr("%s: %s", msg, suffix)
+ }
+ }
+ return val
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/native.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/native.go
new file mode 100644
index 000000000000..acbc44b6d519
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/native.go
@@ -0,0 +1,574 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+var (
+ nativeObjTraitMask = traits.FieldTesterType | traits.IndexerType
+ jsonValueType = reflect.TypeOf(&structpb.Value{})
+ jsonStructType = reflect.TypeOf(&structpb.Struct{})
+)
+
+// NativeTypes creates a type provider which uses reflect.Type and reflect.Value instances
+// to produce type definitions that can be used within CEL.
+//
+// All struct types in Go are exposed to CEL via their simple package name and struct type name:
+//
+// ```go
+// package identity
+//
+// type Account struct {
+// ID int
+// }
+//
+// ```
+//
+// The type `identity.Account` would be exported to CEL using the same qualified name, e.g.
+// `identity.Account{ID: 1234}` would create a new `Account` instance with the `ID` field
+// populated.
+//
+// Only exported fields are exposed via NativeTypes, and the type-mapping between Go and CEL
+// is as follows:
+//
+// | Go type | CEL type |
+// |-------------------------------------|-----------|
+// | bool | bool |
+// | []byte | bytes |
+// | float32, float64 | double |
+// | int, int8, int16, int32, int64 | int |
+// | string | string |
+// | uint, uint8, uint16, uint32, uint64 | uint |
+// | time.Duration | duration |
+// | time.Time | timestamp |
+// | array, slice | list |
+// | map | map |
+//
+// Please note, if you intend to configure support for proto messages in addition to native
+// types, you will need to provide the protobuf types before the golang native types. The
+// same advice holds if you are using custom type adapters and type providers. The native type
+// provider composes over whichever type adapter and provider is configured in the cel.Env at
+// the time that it is invoked.
+func NativeTypes(refTypes ...any) cel.EnvOption {
+ return func(env *cel.Env) (*cel.Env, error) {
+ tp, err := newNativeTypeProvider(env.TypeAdapter(), env.TypeProvider(), refTypes...)
+ if err != nil {
+ return nil, err
+ }
+ env, err = cel.CustomTypeAdapter(tp)(env)
+ if err != nil {
+ return nil, err
+ }
+ return cel.CustomTypeProvider(tp)(env)
+ }
+}
+
+func newNativeTypeProvider(adapter ref.TypeAdapter, provider ref.TypeProvider, refTypes ...any) (*nativeTypeProvider, error) {
+ nativeTypes := make(map[string]*nativeType, len(refTypes))
+ for _, refType := range refTypes {
+ switch rt := refType.(type) {
+ case reflect.Type:
+ t, err := newNativeType(rt)
+ if err != nil {
+ return nil, err
+ }
+ nativeTypes[t.TypeName()] = t
+ case reflect.Value:
+ t, err := newNativeType(rt.Type())
+ if err != nil {
+ return nil, err
+ }
+ nativeTypes[t.TypeName()] = t
+ default:
+ return nil, fmt.Errorf("unsupported native type: %v (%T) must be reflect.Type or reflect.Value", rt, rt)
+ }
+ }
+ return &nativeTypeProvider{
+ nativeTypes: nativeTypes,
+ baseAdapter: adapter,
+ baseProvider: provider,
+ }, nil
+}
+
+type nativeTypeProvider struct {
+ nativeTypes map[string]*nativeType
+ baseAdapter ref.TypeAdapter
+ baseProvider ref.TypeProvider
+}
+
+// EnumValue proxies to the ref.TypeProvider configured at the times the NativeTypes
+// option was configured.
+func (tp *nativeTypeProvider) EnumValue(enumName string) ref.Val {
+ return tp.baseProvider.EnumValue(enumName)
+}
+
+// FindIdent looks up natives type instances by qualified identifier, and if not found
+// proxies to the composed ref.TypeProvider.
+func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) {
+ if t, found := tp.nativeTypes[typeName]; found {
+ return t, true
+ }
+ return tp.baseProvider.FindIdent(typeName)
+}
+
+// FindType looks up CEL type-checker type definition by qualified identifier, and if not found
+// proxies to the composed ref.TypeProvider.
+func (tp *nativeTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
+ if _, found := tp.nativeTypes[typeName]; found {
+ return decls.NewTypeType(decls.NewObjectType(typeName)), true
+ }
+ return tp.baseProvider.FindType(typeName)
+}
+
+// FindFieldType looks up a native type's field definition, and if the type name is not a native
+// type then proxies to the composed ref.TypeProvider
+func (tp *nativeTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) {
+ t, found := tp.nativeTypes[typeName]
+ if !found {
+ return tp.baseProvider.FindFieldType(typeName, fieldName)
+ }
+ refField, isDefined := t.hasField(fieldName)
+ if !found || !isDefined {
+ return nil, false
+ }
+ exprType, ok := convertToExprType(refField.Type)
+ if !ok {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: exprType,
+ IsSet: func(obj any) bool {
+ refVal := reflect.Indirect(reflect.ValueOf(obj))
+ refField := refVal.FieldByName(fieldName)
+ return !refField.IsZero()
+ },
+ GetFrom: func(obj any) (any, error) {
+ refVal := reflect.Indirect(reflect.ValueOf(obj))
+ refField := refVal.FieldByName(fieldName)
+ return getFieldValue(tp, refField), nil
+ },
+ }, true
+}
+
+// NewValue implements the ref.TypeProvider interface method.
+func (tp *nativeTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val {
+ t, found := tp.nativeTypes[typeName]
+ if !found {
+ return tp.baseProvider.NewValue(typeName, fields)
+ }
+ refPtr := reflect.New(t.refType)
+ refVal := refPtr.Elem()
+ for fieldName, val := range fields {
+ refFieldDef, isDefined := t.hasField(fieldName)
+ if !isDefined {
+ return types.NewErr("no such field: %s", fieldName)
+ }
+ fieldVal, err := val.ConvertToNative(refFieldDef.Type)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ refField := refVal.FieldByIndex(refFieldDef.Index)
+ refFieldVal := reflect.ValueOf(fieldVal)
+ refField.Set(refFieldVal)
+ }
+ return tp.NativeToValue(refPtr.Interface())
+}
+
+// NewValue adapts native values to CEL values and will proxy to the composed type adapter
+// for non-native types.
+func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val {
+ if val == nil {
+ return types.NullValue
+ }
+ if v, ok := val.(ref.Val); ok {
+ return v
+ }
+ rawVal := reflect.ValueOf(val)
+ refVal := rawVal
+ if refVal.Kind() == reflect.Ptr {
+ refVal = reflect.Indirect(refVal)
+ }
+ // This isn't quite right if you're also supporting proto,
+ // but maybe an acceptable limitation.
+ switch refVal.Kind() {
+ case reflect.Array, reflect.Slice:
+ switch val := val.(type) {
+ case []byte:
+ return tp.baseAdapter.NativeToValue(val)
+ default:
+ return types.NewDynamicList(tp, val)
+ }
+ case reflect.Map:
+ return types.NewDynamicMap(tp, val)
+ case reflect.Struct:
+ switch val := val.(type) {
+ case proto.Message, *pb.Map, protoreflect.List, protoreflect.Message, protoreflect.Value,
+ time.Time:
+ return tp.baseAdapter.NativeToValue(val)
+ default:
+ return newNativeObject(tp, val, rawVal)
+ }
+ default:
+ return tp.baseAdapter.NativeToValue(val)
+ }
+}
+
+// convertToExprType converts the Golang reflect.Type to a protobuf exprpb.Type.
+func convertToExprType(refType reflect.Type) (*exprpb.Type, bool) {
+ switch refType.Kind() {
+ case reflect.Bool:
+ return decls.Bool, true
+ case reflect.Float32, reflect.Float64:
+ return decls.Double, true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if refType == durationType {
+ return decls.Duration, true
+ }
+ return decls.Int, true
+ case reflect.String:
+ return decls.String, true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return decls.Uint, true
+ case reflect.Array, reflect.Slice:
+ refElem := refType.Elem()
+ if refElem == reflect.TypeOf(byte(0)) {
+ return decls.Bytes, true
+ }
+ elemType, ok := convertToExprType(refElem)
+ if !ok {
+ return nil, false
+ }
+ return decls.NewListType(elemType), true
+ case reflect.Map:
+ keyType, ok := convertToExprType(refType.Key())
+ if !ok {
+ return nil, false
+ }
+ // Ensure the key type is a int, bool, uint, string
+ elemType, ok := convertToExprType(refType.Elem())
+ if !ok {
+ return nil, false
+ }
+ return decls.NewMapType(keyType, elemType), true
+ case reflect.Struct:
+ if refType == timestampType {
+ return decls.Timestamp, true
+ }
+ return decls.NewObjectType(
+ fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()),
+ ), true
+ case reflect.Pointer:
+ if refType.Implements(pbMsgInterfaceType) {
+ pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage)
+ return decls.NewObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true
+ }
+ return convertToExprType(refType.Elem())
+ }
+ return nil, false
+}
+
+func newNativeObject(adapter ref.TypeAdapter, val any, refValue reflect.Value) ref.Val {
+ valType, err := newNativeType(refValue.Type())
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return &nativeObj{
+ TypeAdapter: adapter,
+ val: val,
+ valType: valType,
+ refValue: refValue,
+ }
+}
+
+type nativeObj struct {
+ ref.TypeAdapter
+ val any
+ valType *nativeType
+ refValue reflect.Value
+}
+
+// ConvertToNative implements the ref.Val interface method.
+//
+// CEL does not have a notion of pointers, so whether a field is a pointer or value
+// is handled as part of this conversion step.
+func (o *nativeObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ if o.refValue.Type() == typeDesc {
+ return o.val, nil
+ }
+ if o.refValue.Kind() == reflect.Pointer && o.refValue.Type().Elem() == typeDesc {
+ return o.refValue.Elem().Interface(), nil
+ }
+ if typeDesc.Kind() == reflect.Pointer && o.refValue.Type() == typeDesc.Elem() {
+ ptr := reflect.New(typeDesc.Elem())
+ ptr.Elem().Set(o.refValue)
+ return ptr.Interface(), nil
+ }
+ switch typeDesc {
+ case jsonValueType:
+ jsonStruct, err := o.ConvertToNative(jsonStructType)
+ if err != nil {
+ return nil, err
+ }
+ return structpb.NewStructValue(jsonStruct.(*structpb.Struct)), nil
+ case jsonStructType:
+ refVal := reflect.Indirect(o.refValue)
+ refType := refVal.Type()
+ fields := make(map[string]*structpb.Value, refVal.NumField())
+ for i := 0; i < refVal.NumField(); i++ {
+ fieldType := refType.Field(i)
+ fieldValue := refVal.Field(i)
+ if !fieldValue.IsValid() || fieldValue.IsZero() {
+ continue
+ }
+ fieldCELVal := o.NativeToValue(fieldValue.Interface())
+ fieldJSONVal, err := fieldCELVal.ConvertToNative(jsonValueType)
+ if err != nil {
+ return nil, err
+ }
+ fields[fieldType.Name] = fieldJSONVal.(*structpb.Value)
+ }
+ return &structpb.Struct{Fields: fields}, nil
+ }
+ return nil, fmt.Errorf("type conversion error from '%v' to '%v'", o.Type(), typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (o *nativeObj) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case types.TypeType:
+ return o.valType
+ default:
+ if typeVal.TypeName() == o.valType.typeName {
+ return o
+ }
+ }
+ return types.NewErr("type conversion error from '%s' to '%s'", o.Type(), typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+//
+// Note, that in Golang a pointer to a value is not equal to the value it contains.
+// In CEL pointers and values to which they point are equal.
+func (o *nativeObj) Equal(other ref.Val) ref.Val {
+ otherNtv, ok := other.(*nativeObj)
+ if !ok {
+ return types.False
+ }
+ val := o.val
+ otherVal := otherNtv.val
+ refVal := o.refValue
+ otherRefVal := otherNtv.refValue
+ if refVal.Kind() != otherRefVal.Kind() {
+ if refVal.Kind() == reflect.Pointer {
+ val = refVal.Elem().Interface()
+ } else if otherRefVal.Kind() == reflect.Pointer {
+ otherVal = otherRefVal.Elem().Interface()
+ }
+ }
+ return types.Bool(reflect.DeepEqual(val, otherVal))
+}
+
+// IsZeroValue indicates whether the contained Golang value is a zero value.
+//
+// Golang largely follows proto3 semantics for zero values.
+func (o *nativeObj) IsZeroValue() bool {
+ return reflect.Indirect(o.refValue).IsZero()
+}
+
+// IsSet tests whether a field which is defined is set to a non-default value.
+func (o *nativeObj) IsSet(field ref.Val) ref.Val {
+ refField, refErr := o.getReflectedField(field)
+ if refErr != nil {
+ return refErr
+ }
+ return types.Bool(!refField.IsZero())
+}
+
+// Get returns the value fo a field name.
+func (o *nativeObj) Get(field ref.Val) ref.Val {
+ refField, refErr := o.getReflectedField(field)
+ if refErr != nil {
+ return refErr
+ }
+ return adaptFieldValue(o, refField)
+}
+
+func (o *nativeObj) getReflectedField(field ref.Val) (reflect.Value, ref.Val) {
+ fieldName, ok := field.(types.String)
+ if !ok {
+ return reflect.Value{}, types.MaybeNoSuchOverloadErr(field)
+ }
+ fieldNameStr := string(fieldName)
+ refField, isDefined := o.valType.hasField(fieldNameStr)
+ if !isDefined {
+ return reflect.Value{}, types.NewErr("no such field: %s", fieldName)
+ }
+ refVal := reflect.Indirect(o.refValue)
+ return refVal.FieldByIndex(refField.Index), nil
+}
+
+// Type implements the ref.Val interface method.
+func (o *nativeObj) Type() ref.Type {
+ return o.valType
+}
+
+// Value implements the ref.Val interface method.
+func (o *nativeObj) Value() any {
+ return o.val
+}
+
+func newNativeType(rawType reflect.Type) (*nativeType, error) {
+ refType := rawType
+ if refType.Kind() == reflect.Pointer {
+ refType = refType.Elem()
+ }
+ if !isValidObjectType(refType) {
+ return nil, fmt.Errorf("unsupported reflect.Type %v, must be reflect.Struct", rawType)
+ }
+ return &nativeType{
+ typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()),
+ refType: refType,
+ }, nil
+}
+
+type nativeType struct {
+ typeName string
+ refType reflect.Type
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t *nativeType) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("type conversion error for type to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t *nativeType) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case types.TypeType:
+ return types.TypeType
+ }
+ return types.NewErr("type conversion error from '%s' to '%s'", types.TypeType, typeVal)
+}
+
+// Equal returns true of both type names are equal to each other.
+func (t *nativeType) Equal(other ref.Val) ref.Val {
+ otherType, ok := other.(ref.Type)
+ return types.Bool(ok && t.TypeName() == otherType.TypeName())
+}
+
+// HasTrait implements the ref.Type interface method.
+func (t *nativeType) HasTrait(trait int) bool {
+ return nativeObjTraitMask&trait == trait
+}
+
+// String implements the strings.Stringer interface method.
+func (t *nativeType) String() string {
+ return t.typeName
+}
+
+// Type implements the ref.Val interface method.
+func (t *nativeType) Type() ref.Type {
+ return types.TypeType
+}
+
+// TypeName implements the ref.Type interface method.
+func (t *nativeType) TypeName() string {
+ return t.typeName
+}
+
+// Value implements the ref.Val interface method.
+func (t *nativeType) Value() any {
+ return t.typeName
+}
+
+// hasField returns whether a field name has a corresponding Golang reflect.StructField
+func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) {
+ f, found := t.refType.FieldByName(fieldName)
+ if !found || !f.IsExported() || !isSupportedType(f.Type) {
+ return reflect.StructField{}, false
+ }
+ return f, true
+}
+
+func adaptFieldValue(adapter ref.TypeAdapter, refField reflect.Value) ref.Val {
+ return adapter.NativeToValue(getFieldValue(adapter, refField))
+}
+
+func getFieldValue(adapter ref.TypeAdapter, refField reflect.Value) any {
+ if refField.IsZero() {
+ switch refField.Kind() {
+ case reflect.Array, reflect.Slice:
+ return types.NewDynamicList(adapter, []ref.Val{})
+ case reflect.Map:
+ return types.NewDynamicMap(adapter, map[ref.Val]ref.Val{})
+ case reflect.Struct:
+ if refField.Type() == timestampType {
+ return types.Timestamp{Time: time.Unix(0, 0)}
+ }
+ return reflect.New(refField.Type()).Elem().Interface()
+ case reflect.Pointer:
+ return reflect.New(refField.Type().Elem()).Interface()
+ }
+ }
+ return refField.Interface()
+}
+
+func simplePkgAlias(pkgPath string) string {
+ paths := strings.Split(pkgPath, "/")
+ if len(paths) == 0 {
+ return ""
+ }
+ return paths[len(paths)-1]
+}
+
+func isValidObjectType(refType reflect.Type) bool {
+ return refType.Kind() == reflect.Struct
+}
+
+func isSupportedType(refType reflect.Type) bool {
+ switch refType.Kind() {
+ case reflect.Chan, reflect.Complex64, reflect.Complex128, reflect.Func, reflect.UnsafePointer, reflect.Uintptr:
+ return false
+ case reflect.Array, reflect.Slice:
+ return isSupportedType(refType.Elem())
+ case reflect.Map:
+ return isSupportedType(refType.Key()) && isSupportedType(refType.Elem())
+ }
+ return true
+}
+
+var (
+ pbMsgInterfaceType = reflect.TypeOf((*protoreflect.ProtoMessage)(nil)).Elem()
+ timestampType = reflect.TypeOf(time.Now())
+ durationType = reflect.TypeOf(time.Nanosecond)
+)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/protos.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/protos.go
new file mode 100644
index 000000000000..b905e710c14f
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/protos.go
@@ -0,0 +1,145 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Protos returns a cel.EnvOption to configure extended macros and functions for
+// proto manipulation.
+//
+// Note, all macros use the 'proto' namespace; however, at the time of macro
+// expansion the namespace looks just like any other identifier. If you are
+// currently using a variable named 'proto', the macro will likely work just as
+// intended; however, there is some chance for collision.
+//
+// # Protos.GetExt
+//
+// Macro which generates a select expression that retrieves an extension field
+// from the input proto2 syntax message. If the field is not set, the default
+// value forthe extension field is returned according to safe-traversal semantics.
+//
+// proto.getExt(, ) ->
+//
+// Examples:
+//
+// proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value
+//
+// # Protos.HasExt
+//
+// Macro which generates a test-only select expression that determines whether
+// an extension field is set on a proto2 syntax message.
+//
+// proto.hasExt(, ) ->
+//
+// Examples:
+//
+// proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false
+func Protos() cel.EnvOption {
+ return cel.Lib(protoLib{})
+}
+
+var (
+ protoNamespace = "proto"
+ hasExtension = "hasExt"
+ getExtension = "getExt"
+)
+
+type protoLib struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (protoLib) LibraryName() string {
+ return "cel.lib.ext.protos"
+}
+
+// CompileOptions implements the Library interface method.
+func (protoLib) CompileOptions() []cel.EnvOption {
+ return []cel.EnvOption{
+ cel.Macros(
+ // proto.getExt(msg, select_expression)
+ cel.NewReceiverMacro(getExtension, 2, getProtoExt),
+ // proto.hasExt(msg, select_expression)
+ cel.NewReceiverMacro(hasExtension, 2, hasProtoExt),
+ ),
+ }
+}
+
+// ProgramOptions implements the Library interface method.
+func (protoLib) ProgramOptions() []cel.ProgramOption {
+ return []cel.ProgramOption{}
+}
+
+// hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message.
+func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(protoNamespace, target) {
+ return nil, nil
+ }
+ extensionField, err := getExtFieldName(meh, args[1])
+ if err != nil {
+ return nil, err
+ }
+ return meh.PresenceTest(args[0], extensionField), nil
+}
+
+// getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message.
+func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(protoNamespace, target) {
+ return nil, nil
+ }
+ extFieldName, err := getExtFieldName(meh, args[1])
+ if err != nil {
+ return nil, err
+ }
+ return meh.Select(args[0], extFieldName), nil
+}
+
+func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *common.Error) {
+ isValid := false
+ extensionField := ""
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_SelectExpr:
+ extensionField, isValid = validateIdentifier(expr)
+ }
+ if !isValid {
+ return "", &common.Error{
+ Message: "invalid extension field",
+ Location: meh.OffsetLocation(expr.GetId()),
+ }
+ }
+ return extensionField, nil
+}
+
+func validateIdentifier(expr *exprpb.Expr) (string, bool) {
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ return expr.GetIdentExpr().GetName(), true
+ case *exprpb.Expr_SelectExpr:
+ sel := expr.GetSelectExpr()
+ if sel.GetTestOnly() {
+ return "", false
+ }
+ opStr, isIdent := validateIdentifier(sel.GetOperand())
+ if !isIdent {
+ return "", false
+ }
+ return opStr + "." + sel.GetField(), true
+ default:
+ return "", false
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/strings.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/strings.go
index 6ce239ac2b2e..2962eb663071 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/strings.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/strings.go
@@ -19,32 +19,92 @@ package ext
import (
"fmt"
+ "math"
"reflect"
+ "sort"
"strings"
"unicode"
+ "unicode/utf8"
+
+ "golang.org/x/text/language"
+ "golang.org/x/text/message"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter"
+)
+
+const (
+ defaultLocale = "en-US"
+ defaultPrecision = 6
)
// Strings returns a cel.EnvOption to configure extended functions for string manipulation.
// As a general note, all indices are zero-based.
//
-// CharAt
+// # CharAt
//
// Returns the character at the given position. If the position is negative, or greater than
// the length of the string, the function will produce an error:
//
-// .charAt() ->
+// .charAt() ->
//
// Examples:
//
-// 'hello'.charAt(4) // return 'o'
-// 'hello'.charAt(5) // return ''
-// 'hello'.charAt(-1) // error
+// 'hello'.charAt(4) // return 'o'
+// 'hello'.charAt(5) // return ''
+// 'hello'.charAt(-1) // error
+//
+// # Format
+//
+// Introduced at version: 1
+//
+// Returns a new string with substitutions being performed, printf-style.
+// The valid formatting clauses are:
+//
+// `%s` - substitutes a string. This can also be used on bools, lists, maps, bytes,
+// Duration and Timestamp, in addition to all numerical types (int, uint, and double).
+// Note that the dot/period decimal separator will always be used when printing a list
+// or map that contains a double, and that null can be passed (which results in the
+// string "null") in addition to types.
+// `%d` - substitutes an integer.
+// `%f` - substitutes a double with fixed-point precision. The default precision is 6, but
+// this can be adjusted. The strings `Infinity`, `-Infinity`, and `NaN` are also valid input
+// for this clause.
+// `%e` - substitutes a double in scientific notation. The default precision is 6, but this
+// can be adjusted.
+// `%b` - substitutes an integer with its equivalent binary string. Can also be used on bools.
+// `%x` - substitutes an integer with its equivalent in hexadecimal, or if given a string or
+// bytes, will output each character's equivalent in hexadecimal.
+// `%X` - same as above, but with A-F capitalized.
+// `%o` - substitutes an integer with its equivalent in octal.
+//
+// .format()` ->
+//
+// Examples:
//
-// IndexOf
+// "this is a string: %s\nand an integer: %d".format(["str", 42]) // returns "this is a string: str\nand an integer: 42"
+// "a double substituted with %%s: %s".format([64.2]) // returns "a double substituted with %s: 64.2"
+// "string type: %s".format([type(string)]) // returns "string type: string"
+// "timestamp: %s".format([timestamp("2023-02-03T23:31:20+00:00")]) // returns "timestamp: 2023-02-03T23:31:20Z"
+// "duration: %s".format([duration("1h45m47s")]) // returns "duration: 6347s"
+// "%f".format([3.14]) // returns "3.140000"
+// "scientific notation: %e".format([2.71828]) // returns "scientific notation: 2.718280\u202f\u00d7\u202f10\u2070\u2070"
+// "5 in binary: %b".format([5]), // returns "5 in binary; 101"
+// "26 in hex: %x".format([26]), // returns "26 in hex: 1a"
+// "26 in hex (uppercase): %X".format([26]) // returns "26 in hex (uppercase): 1A"
+// "30 in octal: %o".format([30]) // returns "30 in octal: 36"
+// "a map inside a list: %s".format([[1, 2, 3, {"a": "x", "b": "y", "c": "z"}]]) // returns "a map inside a list: [1, 2, 3, {"a":"x", "b":"y", "c":"d"}]"
+// "true bool: %s - false bool: %s\nbinary bool: %b".format([true, false, true]) // returns "true bool: true - false bool: false\nbinary bool: 1"
+//
+// Passing an incorrect type (an integer to `%s`) is considered an error, as well as attempting
+// to use more formatting clauses than there are arguments (`%d %d %d` while passing two ints, for instance).
+// If compile-time checking is enabled, and the formatting string is a constant, and the argument list is a literal,
+// then letting any arguments go unused/unformatted is also considered an error.
+//
+// # IndexOf
//
// Returns the integer index of the first occurrence of the search string. If the search string is
// not found the function returns -1.
@@ -52,19 +112,19 @@ import (
// The function also accepts an optional position from which to begin the substring search. If the
// substring is the empty string, the index where the search starts is returned (zero or custom).
//
-// .indexOf() ->
-// .indexOf(, ) ->
+// .indexOf() ->
+// .indexOf(, ) ->
//
// Examples:
//
-// 'hello mellow'.indexOf('') // returns 0
-// 'hello mellow'.indexOf('ello') // returns 1
-// 'hello mellow'.indexOf('jello') // returns -1
-// 'hello mellow'.indexOf('', 2) // returns 2
-// 'hello mellow'.indexOf('ello', 2) // returns 7
-// 'hello mellow'.indexOf('ello', 20) // error
+// 'hello mellow'.indexOf('') // returns 0
+// 'hello mellow'.indexOf('ello') // returns 1
+// 'hello mellow'.indexOf('jello') // returns -1
+// 'hello mellow'.indexOf('', 2) // returns 2
+// 'hello mellow'.indexOf('ello', 2) // returns 7
+// 'hello mellow'.indexOf('ello', 20) // error
//
-// Join
+// # Join
//
// Returns a new string where the elements of string list are concatenated.
//
@@ -75,12 +135,12 @@ import (
//
// Examples:
//
-// ['hello', 'mellow'].join() // returns 'hellomellow'
-// ['hello', 'mellow'].join(' ') // returns 'hello mellow'
-// [].join() // returns ''
-// [].join('/') // returns ''
+// ['hello', 'mellow'].join() // returns 'hellomellow'
+// ['hello', 'mellow'].join(' ') // returns 'hello mellow'
+// [].join() // returns ''
+// [].join('/') // returns ''
//
-// LastIndexOf
+// # LastIndexOf
//
// Returns the integer index at the start of the last occurrence of the search string. If the
// search string is not found the function returns -1.
@@ -89,31 +149,45 @@ import (
// considered as the beginning of the substring match. If the substring is the empty string,
// the index where the search starts is returned (string length or custom).
//
-// .lastIndexOf() ->
-// .lastIndexOf(, ) ->
+// .lastIndexOf() ->
+// .lastIndexOf(, ) ->
//
// Examples:
//
-// 'hello mellow'.lastIndexOf('') // returns 12
-// 'hello mellow'.lastIndexOf('ello') // returns 7
-// 'hello mellow'.lastIndexOf('jello') // returns -1
-// 'hello mellow'.lastIndexOf('ello', 6) // returns 1
-// 'hello mellow'.lastIndexOf('ello', -1) // error
+// 'hello mellow'.lastIndexOf('') // returns 12
+// 'hello mellow'.lastIndexOf('ello') // returns 7
+// 'hello mellow'.lastIndexOf('jello') // returns -1
+// 'hello mellow'.lastIndexOf('ello', 6) // returns 1
+// 'hello mellow'.lastIndexOf('ello', -1) // error
//
-// LowerAscii
+// # LowerAscii
//
// Returns a new string where all ASCII characters are lower-cased.
//
// This function does not perform Unicode case-mapping for characters outside the ASCII range.
//
-// .lowerAscii() ->
+// .lowerAscii() ->
+//
+// Examples:
+//
+// 'TacoCat'.lowerAscii() // returns 'tacocat'
+// 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
+//
+// # Quote
+//
+// Introduced in version: 1
+//
+// Takes the given string and makes it safe to print (without any formatting due to escape sequences).
+// If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD.
+//
+// strings.quote()
//
// Examples:
//
-// 'TacoCat'.lowerAscii() // returns 'tacocat'
-// 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
+// strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""'
+// strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"'
//
-// Replace
+// # Replace
//
// Returns a new string based on the target, which replaces the occurrences of a search string
// with a replacement string if present. The function accepts an optional limit on the number of
@@ -122,17 +196,17 @@ import (
// When the replacement limit is 0, the result is the original string. When the limit is a negative
// number, the function behaves the same as replace all.
//
-// .replace(, ) ->
-// .replace(, , ) ->
+// .replace(, ) ->
+// .replace(, , ) ->
//
// Examples:
//
-// 'hello hello'.replace('he', 'we') // returns 'wello wello'
-// 'hello hello'.replace('he', 'we', -1) // returns 'wello wello'
-// 'hello hello'.replace('he', 'we', 1) // returns 'wello hello'
-// 'hello hello'.replace('he', 'we', 0) // returns 'hello hello'
+// 'hello hello'.replace('he', 'we') // returns 'wello wello'
+// 'hello hello'.replace('he', 'we', -1) // returns 'wello wello'
+// 'hello hello'.replace('he', 'we', 1) // returns 'wello hello'
+// 'hello hello'.replace('he', 'we', 0) // returns 'hello hello'
//
-// Split
+// # Split
//
// Returns a list of strings split from the input by the given separator. The function accepts
// an optional argument specifying a limit on the number of substrings produced by the split.
@@ -141,18 +215,18 @@ import (
// target string to split. When the limit is a negative number, the function behaves the same as
// split all.
//
-// .split() -> >
-// .split(, ) -> >
+// .split() -> >
+// .split(, ) -> >
//
// Examples:
//
-// 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello']
-// 'hello hello hello'.split(' ', 0) // returns []
-// 'hello hello hello'.split(' ', 1) // returns ['hello hello hello']
-// 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello']
-// 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello']
+// 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello']
+// 'hello hello hello'.split(' ', 0) // returns []
+// 'hello hello hello'.split(' ', 1) // returns ['hello hello hello']
+// 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello']
+// 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello']
//
-// Substring
+// # Substring
//
// Returns the substring given a numeric range corresponding to character positions. Optionally
// may omit the trailing range for a substring from a given character position until the end of
@@ -162,48 +236,102 @@ import (
// error to specify an end range that is lower than the start range, or for either the start or end
// index to be negative or exceed the string length.
//
-// .substring() ->
-// .substring(, ) ->
+// .substring() ->
+// .substring(, ) ->
//
// Examples:
//
-// 'tacocat'.substring(4) // returns 'cat'
-// 'tacocat'.substring(0, 4) // returns 'taco'
-// 'tacocat'.substring(-1) // error
-// 'tacocat'.substring(2, 1) // error
+// 'tacocat'.substring(4) // returns 'cat'
+// 'tacocat'.substring(0, 4) // returns 'taco'
+// 'tacocat'.substring(-1) // error
+// 'tacocat'.substring(2, 1) // error
//
-// Trim
+// # Trim
//
// Returns a new string which removes the leading and trailing whitespace in the target string.
// The trim function uses the Unicode definition of whitespace which does not include the
// zero-width spaces. See: https://en.wikipedia.org/wiki/Whitespace_character#Unicode
//
-// .trim() ->
+// .trim() ->
//
// Examples:
//
-// ' \ttrim\n '.trim() // returns 'trim'
+// ' \ttrim\n '.trim() // returns 'trim'
//
-// UpperAscii
+// # UpperAscii
//
// Returns a new string where all ASCII characters are upper-cased.
//
// This function does not perform Unicode case-mapping for characters outside the ASCII range.
//
-// .upperAscii() ->
+// .upperAscii() ->
//
// Examples:
//
-// 'TacoCat'.upperAscii() // returns 'TACOCAT'
-// 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
-func Strings() cel.EnvOption {
- return cel.Lib(stringLib{})
+// 'TacoCat'.upperAscii() // returns 'TACOCAT'
+// 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
+func Strings(options ...StringsOption) cel.EnvOption {
+ s := &stringLib{version: math.MaxUint32}
+ for _, o := range options {
+ s = o(s)
+ }
+ return cel.Lib(s)
+}
+
+type stringLib struct {
+ locale string
+ version uint32
+}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (*stringLib) LibraryName() string {
+ return "cel.lib.ext.strings"
+}
+
+// StringsOption is a functional interface for configuring the strings library.
+type StringsOption func(*stringLib) *stringLib
+
+// StringsLocale configures the library with the given locale. The locale tag will
+// be checked for validity at the time that EnvOptions are configured. If this option
+// is not passed, string.format will behave as if en_US was passed as the locale.
+func StringsLocale(locale string) StringsOption {
+ return func(sl *stringLib) *stringLib {
+ sl.locale = locale
+ return sl
+ }
}
-type stringLib struct{}
+// StringsVersion configures the version of the string library. The version limits which
+// functions are available. Only functions introduced below or equal to the given
+// version included in the library. See the library documentation to determine
+// which version a function was introduced at. If the documentation does not
+// state which version a function was introduced at, it can be assumed to be
+// introduced at version 0, when the library was first created.
+// If this option is not set, all functions are available.
+func StringsVersion(version uint32) func(lib *stringLib) *stringLib {
+ return func(sl *stringLib) *stringLib {
+ sl.version = version
+ return sl
+ }
+}
-func (stringLib) CompileOptions() []cel.EnvOption {
- return []cel.EnvOption{
+// CompileOptions implements the Library interface method.
+func (sl *stringLib) CompileOptions() []cel.EnvOption {
+ formatLocale := "en_US"
+ if sl.locale != "" {
+ // ensure locale is properly-formed if set
+ _, err := language.Parse(sl.locale)
+ if err != nil {
+ return []cel.EnvOption{
+ func(e *cel.Env) (*cel.Env, error) {
+ return nil, fmt.Errorf("failed to parse locale: %w", err)
+ },
+ }
+ }
+ formatLocale = sl.locale
+ }
+
+ opts := []cel.EnvOption{
cel.Function("charAt",
cel.MemberOverload("string_char_at_int", []*cel.Type{cel.StringType, cel.IntType}, cel.StringType,
cel.BinaryBinding(func(str, ind ref.Val) ref.Val {
@@ -322,9 +450,26 @@ func (stringLib) CompileOptions() []cel.EnvOption {
return stringOrError(joinSeparator(l.([]string), string(d)))
}))),
}
+ if sl.version >= 1 {
+ opts = append(opts, cel.Function("format",
+ cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType,
+ cel.FunctionBinding(func(args ...ref.Val) ref.Val {
+ s := args[0].(types.String).Value().(string)
+ formatArgs := args[1].(traits.Lister)
+ return stringOrError(interpreter.ParseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale))
+ }))),
+ cel.Function("strings.quote", cel.Overload("strings_quote", []*cel.Type{cel.StringType}, cel.StringType,
+ cel.UnaryBinding(func(str ref.Val) ref.Val {
+ s := str.(types.String)
+ return stringOrError(quote(string(s)))
+ }))))
+
+ }
+ return opts
}
-func (stringLib) ProgramOptions() []cel.ProgramOption {
+// ProgramOptions implements the Library interface method.
+func (*stringLib) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
@@ -478,6 +623,435 @@ func join(strs []string) (string, error) {
return strings.Join(strs, ""), nil
}
+type clauseImpl func(ref.Val, string) (string, error)
+
+func clauseForType(argType ref.Type) (clauseImpl, error) {
+ switch argType {
+ case types.IntType, types.UintType:
+ return formatDecimal, nil
+ case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType:
+ return FormatString, nil
+ case types.TimestampType, types.DurationType:
+ // special case to ensure timestamps/durations get printed as CEL literals
+ return func(arg ref.Val, locale string) (string, error) {
+ argStrVal := arg.ConvertToType(types.StringType)
+ argStr := argStrVal.Value().(string)
+ if arg.Type() == types.TimestampType {
+ return fmt.Sprintf("timestamp(%q)", argStr), nil
+ }
+ if arg.Type() == types.DurationType {
+ return fmt.Sprintf("duration(%q)", argStr), nil
+ }
+ return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName())
+ }, nil
+ case types.ListType:
+ return formatList, nil
+ case types.MapType:
+ return formatMap, nil
+ case types.DoubleType:
+ // avoid formatFixed so we can output a period as the decimal separator in order
+ // to always be a valid CEL literal
+ return func(arg ref.Val, locale string) (string, error) {
+ argDouble, ok := arg.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName())
+ }
+ fmtStr := fmt.Sprintf("%%.%df", defaultPrecision)
+ return fmt.Sprintf(fmtStr, argDouble), nil
+ }, nil
+ case types.TypeType:
+ return func(arg ref.Val, locale string) (string, error) {
+ return fmt.Sprintf("type(%s)", arg.Value().(string)), nil
+ }, nil
+ default:
+ return nil, fmt.Errorf("no formatting function for %s", argType.TypeName())
+ }
+}
+
+func formatList(arg ref.Val, locale string) (string, error) {
+ argList := arg.(traits.Lister)
+ argIterator := argList.Iterator()
+ var listStrBuilder strings.Builder
+ _, err := listStrBuilder.WriteRune('[')
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ for argIterator.HasNext() == types.True {
+ member := argIterator.Next()
+ memberFormat, err := clauseForType(member.Type())
+ if err != nil {
+ return "", err
+ }
+ unquotedStr, err := memberFormat(member, locale)
+ if err != nil {
+ return "", err
+ }
+ str := quoteForCEL(member, unquotedStr)
+ _, err = listStrBuilder.WriteString(str)
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ if argIterator.HasNext() == types.True {
+ _, err = listStrBuilder.WriteString(", ")
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ }
+ }
+ _, err = listStrBuilder.WriteRune(']')
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ return listStrBuilder.String(), nil
+}
+
+func formatMap(arg ref.Val, locale string) (string, error) {
+ argMap := arg.(traits.Mapper)
+ argIterator := argMap.Iterator()
+ type mapPair struct {
+ key string
+ value string
+ }
+ argPairs := make([]mapPair, argMap.Size().Value().(int64))
+ i := 0
+ for argIterator.HasNext() == types.True {
+ key := argIterator.Next()
+ var keyFormat clauseImpl
+ switch key.Type() {
+ case types.StringType, types.BoolType:
+ keyFormat = FormatString
+ case types.IntType, types.UintType:
+ keyFormat = formatDecimal
+ default:
+ return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName())
+ }
+ unquotedKeyStr, err := keyFormat(key, locale)
+ if err != nil {
+ return "", err
+ }
+ keyStr := quoteForCEL(key, unquotedKeyStr)
+ value, found := argMap.Find(key)
+ if !found {
+ return "", fmt.Errorf("could not find key: %q", key)
+ }
+ valueFormat, err := clauseForType(value.Type())
+ if err != nil {
+ return "", err
+ }
+ unquotedValueStr, err := valueFormat(value, locale)
+ if err != nil {
+ return "", err
+ }
+ valueStr := quoteForCEL(value, unquotedValueStr)
+ argPairs[i] = mapPair{keyStr, valueStr}
+ i++
+ }
+ sort.SliceStable(argPairs, func(x, y int) bool {
+ return argPairs[x].key < argPairs[y].key
+ })
+ var mapStrBuilder strings.Builder
+ _, err := mapStrBuilder.WriteRune('{')
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ for i, entry := range argPairs {
+ _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value))
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ if i < len(argPairs)-1 {
+ _, err = mapStrBuilder.WriteString(", ")
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ }
+ }
+ _, err = mapStrBuilder.WriteRune('}')
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ return mapStrBuilder.String(), nil
+}
+
+// quoteForCEL takes a formatted, unquoted value and quotes it in a manner
+// suitable for embedding directly in CEL.
+func quoteForCEL(refVal ref.Val, unquotedValue string) string {
+ switch refVal.Type() {
+ case types.StringType:
+ return fmt.Sprintf("%q", unquotedValue)
+ case types.BytesType:
+ return fmt.Sprintf("b%q", unquotedValue)
+ case types.DoubleType:
+ // special case to handle infinity/NaN
+ num := refVal.Value().(float64)
+ if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) {
+ return fmt.Sprintf("%q", unquotedValue)
+ }
+ return unquotedValue
+ default:
+ return unquotedValue
+ }
+}
+
+// FormatString returns the string representation of a CEL value.
+// It is used to implement the %s specifier in the (string).format() extension
+// function.
+func FormatString(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.ListType:
+ return formatList(arg, locale)
+ case types.MapType:
+ return formatMap(arg, locale)
+ case types.IntType, types.UintType, types.DoubleType,
+ types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType:
+ argStrVal := arg.ConvertToType(types.StringType)
+ argStr, ok := argStrVal.Value().(string)
+ if !ok {
+ return "", fmt.Errorf("could not convert argument %q to string", argStrVal)
+ }
+ return argStr, nil
+ case types.NullType:
+ return "null", nil
+ default:
+ return "", fmt.Errorf("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", arg.Type().TypeName())
+ }
+}
+
+func formatDecimal(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt, ok := arg.ConvertToType(types.IntType).Value().(int64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value())
+ }
+ return fmt.Sprintf("%d", argInt), nil
+ case types.UintType:
+ argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value())
+ }
+ return fmt.Sprintf("%d", argInt), nil
+ default:
+ return "", fmt.Errorf("decimal clause can only be used on integers, was given %s", arg.Type().TypeName())
+ }
+}
+
+func matchLanguage(locale string) (language.Tag, error) {
+ matcher, err := makeMatcher(locale)
+ if err != nil {
+ return language.Und, err
+ }
+ tag, _ := language.MatchStrings(matcher, locale)
+ return tag, nil
+}
+
+func makeMatcher(locale string) (language.Matcher, error) {
+ tags := make([]language.Tag, 0)
+ tag, err := language.Parse(locale)
+ if err != nil {
+ return nil, err
+ }
+ tags = append(tags, tag)
+ return language.NewMatcher(tags), nil
+}
+
+// quote implements a string quoting function. The string will be wrapped in
+// double quotes, and all valid CEL escape sequences will be escaped to show up
+// literally if printed. If the input contains any invalid UTF-8, the invalid runes
+// will be replaced with utf8.RuneError.
+func quote(s string) (string, error) {
+ var quotedStrBuilder strings.Builder
+ for _, c := range sanitize(s) {
+ switch c {
+ case '\a':
+ quotedStrBuilder.WriteString("\\a")
+ case '\b':
+ quotedStrBuilder.WriteString("\\b")
+ case '\f':
+ quotedStrBuilder.WriteString("\\f")
+ case '\n':
+ quotedStrBuilder.WriteString("\\n")
+ case '\r':
+ quotedStrBuilder.WriteString("\\r")
+ case '\t':
+ quotedStrBuilder.WriteString("\\t")
+ case '\v':
+ quotedStrBuilder.WriteString("\\v")
+ case '\\':
+ quotedStrBuilder.WriteString("\\\\")
+ case '"':
+ quotedStrBuilder.WriteString("\\\"")
+ default:
+ quotedStrBuilder.WriteRune(c)
+ }
+ }
+ escapedStr := quotedStrBuilder.String()
+ return "\"" + escapedStr + "\"", nil
+}
+
+// sanitize replaces all invalid runes in the given string with utf8.RuneError.
+func sanitize(s string) string {
+ var sanitizedStringBuilder strings.Builder
+ for _, r := range s {
+ if !utf8.ValidRune(r) {
+ sanitizedStringBuilder.WriteRune(utf8.RuneError)
+ } else {
+ sanitizedStringBuilder.WriteRune(r)
+ }
+ }
+ return sanitizedStringBuilder.String()
+}
+
+type stringFormatter struct{}
+
+func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) {
+ return FormatString(arg, locale)
+}
+
+func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) {
+ return formatDecimal(arg, locale)
+}
+
+func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) {
+ if precision == nil {
+ precision = new(int)
+ *precision = defaultPrecision
+ }
+ return func(arg ref.Val, locale string) (string, error) {
+ strException := false
+ if arg.Type() == types.StringType {
+ argStr := arg.Value().(string)
+ if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" {
+ strException = true
+ }
+ }
+ if arg.Type() != types.DoubleType && !strException {
+ return "", fmt.Errorf("fixed-point clause can only be used on doubles, was given %s", arg.Type().TypeName())
+ }
+ argFloatVal := arg.ConvertToType(types.DoubleType)
+ argFloat, ok := argFloatVal.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value())
+ }
+ fmtStr := fmt.Sprintf("%%.%df", *precision)
+
+ matchedLocale, err := matchLanguage(locale)
+ if err != nil {
+ return "", fmt.Errorf("error matching locale: %w", err)
+ }
+ return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil
+ }
+}
+
+func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) {
+ if precision == nil {
+ precision = new(int)
+ *precision = defaultPrecision
+ }
+ return func(arg ref.Val, locale string) (string, error) {
+ strException := false
+ if arg.Type() == types.StringType {
+ argStr := arg.Value().(string)
+ if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" {
+ strException = true
+ }
+ }
+ if arg.Type() != types.DoubleType && !strException {
+ return "", fmt.Errorf("scientific clause can only be used on doubles, was given %s", arg.Type().TypeName())
+ }
+ argFloatVal := arg.ConvertToType(types.DoubleType)
+ argFloat, ok := argFloatVal.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value())
+ }
+ matchedLocale, err := matchLanguage(locale)
+ if err != nil {
+ return "", fmt.Errorf("error matching locale: %w", err)
+ }
+ fmtStr := fmt.Sprintf("%%%de", *precision)
+ return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil
+ }
+}
+
+func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt := arg.Value().(int64)
+ // locale is intentionally unused as integers formatted as binary
+ // strings are locale-independent
+ return fmt.Sprintf("%b", argInt), nil
+ case types.UintType:
+ argInt := arg.Value().(uint64)
+ return fmt.Sprintf("%b", argInt), nil
+ case types.BoolType:
+ argBool := arg.Value().(bool)
+ if argBool {
+ return "1", nil
+ }
+ return "0", nil
+ default:
+ return "", fmt.Errorf("only integers and bools can be formatted as binary, was given %s", arg.Type().TypeName())
+ }
+}
+
+func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ fmtStr := "%x"
+ if useUpper {
+ fmtStr = "%X"
+ }
+ switch arg.Type() {
+ case types.StringType, types.BytesType:
+ if arg.Type() == types.BytesType {
+ return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil
+ }
+ return fmt.Sprintf(fmtStr, arg.Value().(string)), nil
+ case types.IntType:
+ argInt, ok := arg.Value().(int64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value())
+ }
+ return fmt.Sprintf(fmtStr, argInt), nil
+ case types.UintType:
+ argInt, ok := arg.Value().(uint64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value())
+ }
+ return fmt.Sprintf(fmtStr, argInt), nil
+ default:
+ return "", fmt.Errorf("only integers, byte buffers, and strings can be formatted as hex, was given %s", arg.Type().TypeName())
+ }
+ }
+}
+
+func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt := arg.Value().(int64)
+ return fmt.Sprintf("%o", argInt), nil
+ case types.UintType:
+ argInt := arg.Value().(uint64)
+ return fmt.Sprintf("%o", argInt), nil
+ default:
+ return "", fmt.Errorf("octal clause can only be used on integers, was given %s", arg.Type().TypeName())
+ }
+}
+
+type stringArgList struct {
+ args traits.Lister
+}
+
+func (c *stringArgList) Arg(index int64) (ref.Val, error) {
+ if index >= c.args.Size().Value().(int64) {
+ return nil, fmt.Errorf("index %d out of range", index)
+ }
+ return c.args.Get(types.Int(index)), nil
+}
+
+func (c *stringArgList) ArgSize() int64 {
+ return c.args.Size().Value().(int64)
+}
+
var (
stringListType = reflect.TypeOf([]string{})
)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
index 04a3ec7441be..c6a620656bf6 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
@@ -11,10 +11,10 @@ go_library(
"activation.go",
"attribute_patterns.go",
"attributes.go",
- "coster.go",
"decorators.go",
"dispatcher.go",
"evalstate.go",
+ "formatting.go",
"interpretable.go",
"interpreter.go",
"optimizations.go",
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/activation.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/activation.go
index 8686d4f04f1e..f82e4e9038b7 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/activation.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/activation.go
@@ -28,7 +28,7 @@ import (
type Activation interface {
// ResolveName returns a value from the activation by qualified name, or false if the name
// could not be found.
- ResolveName(name string) (interface{}, bool)
+ ResolveName(name string) (any, bool)
// Parent returns the parent of the current activation, may be nil.
// If non-nil, the parent will be searched during resolve calls.
@@ -43,23 +43,23 @@ func EmptyActivation() Activation {
// emptyActivation is a variable-free activation.
type emptyActivation struct{}
-func (emptyActivation) ResolveName(string) (interface{}, bool) { return nil, false }
-func (emptyActivation) Parent() Activation { return nil }
+func (emptyActivation) ResolveName(string) (any, bool) { return nil, false }
+func (emptyActivation) Parent() Activation { return nil }
// NewActivation returns an activation based on a map-based binding where the map keys are
// expected to be qualified names used with ResolveName calls.
//
-// The input `bindings` may either be of type `Activation` or `map[string]interface{}`.
+// The input `bindings` may either be of type `Activation` or `map[string]any`.
//
// Lazy bindings may be supplied within the map-based input in either of the following forms:
-// - func() interface{}
+// - func() any
// - func() ref.Val
//
// The output of the lazy binding will overwrite the variable reference in the internal map.
//
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
// the ref.TypeAdapter configured in the environment.
-func NewActivation(bindings interface{}) (Activation, error) {
+func NewActivation(bindings any) (Activation, error) {
if bindings == nil {
return nil, errors.New("bindings must be non-nil")
}
@@ -67,7 +67,7 @@ func NewActivation(bindings interface{}) (Activation, error) {
if isActivation {
return a, nil
}
- m, isMap := bindings.(map[string]interface{})
+ m, isMap := bindings.(map[string]any)
if !isMap {
return nil, fmt.Errorf(
"activation input must be an activation or map[string]interface: got %T",
@@ -81,7 +81,7 @@ func NewActivation(bindings interface{}) (Activation, error) {
// Named bindings may lazily supply values by providing a function which accepts no arguments and
// produces an interface value.
type mapActivation struct {
- bindings map[string]interface{}
+ bindings map[string]any
}
// Parent implements the Activation interface method.
@@ -90,7 +90,7 @@ func (a *mapActivation) Parent() Activation {
}
// ResolveName implements the Activation interface method.
-func (a *mapActivation) ResolveName(name string) (interface{}, bool) {
+func (a *mapActivation) ResolveName(name string) (any, bool) {
obj, found := a.bindings[name]
if !found {
return nil, false
@@ -100,7 +100,7 @@ func (a *mapActivation) ResolveName(name string) (interface{}, bool) {
obj = fn()
a.bindings[name] = obj
}
- fnRaw, isLazy := obj.(func() interface{})
+ fnRaw, isLazy := obj.(func() any)
if isLazy {
obj = fnRaw()
a.bindings[name] = obj
@@ -121,7 +121,7 @@ func (a *hierarchicalActivation) Parent() Activation {
}
// ResolveName implements the Activation interface method.
-func (a *hierarchicalActivation) ResolveName(name string) (interface{}, bool) {
+func (a *hierarchicalActivation) ResolveName(name string) (any, bool) {
if object, found := a.child.ResolveName(name); found {
return object, found
}
@@ -138,8 +138,8 @@ func NewHierarchicalActivation(parent Activation, child Activation) Activation {
// representing field and index operations that should result in a 'types.Unknown' result.
//
// The `bindings` value may be any value type supported by the interpreter.NewActivation call,
-// but is typically either an existing Activation or map[string]interface{}.
-func NewPartialActivation(bindings interface{},
+// but is typically either an existing Activation or map[string]any.
+func NewPartialActivation(bindings any,
unknowns ...*AttributePattern) (PartialActivation, error) {
a, err := NewActivation(bindings)
if err != nil {
@@ -184,7 +184,7 @@ func (v *varActivation) Parent() Activation {
}
// ResolveName implements the Activation interface method.
-func (v *varActivation) ResolveName(name string) (interface{}, bool) {
+func (v *varActivation) ResolveName(name string) (any, bool) {
if name == v.name {
return v.val, true
}
@@ -194,7 +194,7 @@ func (v *varActivation) ResolveName(name string) (interface{}, bool) {
var (
// pool of var activations to reduce allocations during folds.
varActivationPool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &varActivation{}
},
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
index b33f7f7fd90f..afb7c8d5bf38 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
@@ -15,8 +15,6 @@
package interpreter
import (
- "fmt"
-
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
@@ -36,9 +34,9 @@ import (
//
// Examples:
//
-// 1. ns.myvar["complex-value"]
-// 2. ns.myvar["complex-value"][0]
-// 3. ns.myvar["complex-value"].*.name
+// 1. ns.myvar["complex-value"]
+// 2. ns.myvar["complex-value"][0]
+// 3. ns.myvar["complex-value"].*.name
//
// The first example is simple: match an attribute where the variable is 'ns.myvar' with a
// field access on 'complex-value'. The second example expands the match to indicate that only
@@ -108,7 +106,7 @@ func (apat *AttributePattern) QualifierPatterns() []*AttributeQualifierPattern {
// AttributeQualifierPattern holds a wildcard or valued qualifier pattern.
type AttributeQualifierPattern struct {
wildcard bool
- value interface{}
+ value any
}
// Matches returns true if the qualifier pattern is a wildcard, or the Qualifier implements the
@@ -134,44 +132,44 @@ func (qpat *AttributeQualifierPattern) Matches(q Qualifier) bool {
type qualifierValueEquator interface {
// QualifierValueEquals returns true if the input value is equal to the value held in the
// Qualifier.
- QualifierValueEquals(value interface{}) bool
+ QualifierValueEquals(value any) bool
}
// QualifierValueEquals implementation for boolean qualifiers.
-func (q *boolQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *boolQualifier) QualifierValueEquals(value any) bool {
bval, ok := value.(bool)
return ok && q.value == bval
}
// QualifierValueEquals implementation for field qualifiers.
-func (q *fieldQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *fieldQualifier) QualifierValueEquals(value any) bool {
sval, ok := value.(string)
return ok && q.Name == sval
}
// QualifierValueEquals implementation for string qualifiers.
-func (q *stringQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *stringQualifier) QualifierValueEquals(value any) bool {
sval, ok := value.(string)
return ok && q.value == sval
}
// QualifierValueEquals implementation for int qualifiers.
-func (q *intQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *intQualifier) QualifierValueEquals(value any) bool {
return numericValueEquals(value, q.celValue)
}
// QualifierValueEquals implementation for uint qualifiers.
-func (q *uintQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *uintQualifier) QualifierValueEquals(value any) bool {
return numericValueEquals(value, q.celValue)
}
// QualifierValueEquals implementation for double qualifiers.
-func (q *doubleQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *doubleQualifier) QualifierValueEquals(value any) bool {
return numericValueEquals(value, q.celValue)
}
// numericValueEquals uses CEL equality to determine whether two number values are
-func numericValueEquals(value interface{}, celValue ref.Val) bool {
+func numericValueEquals(value any, celValue ref.Val) bool {
val := types.DefaultTypeAdapter.NativeToValue(value)
return celValue.Equal(val) == types.True
}
@@ -272,13 +270,9 @@ func (fac *partialAttributeFactory) matchesUnknownPatterns(
if err != nil {
return nil, err
}
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
// If this resolution behavior ever changes, new implementations of the
// qualifierValueEquator may be required to handle proper resolution.
- qual, err = fac.NewQualifier(nil, qual.ID(), val)
+ qual, err = fac.NewQualifier(nil, qual.ID(), val, attr.IsOptional())
if err != nil {
return nil, err
}
@@ -338,24 +332,10 @@ func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) {
return m, nil
}
-// Resolve is an implementation of the Attribute interface method which uses the
-// attributeMatcher TryResolve implementation rather than the embedded NamespacedAttribute
-// Resolve implementation.
-func (m *attributeMatcher) Resolve(vars Activation) (interface{}, error) {
- obj, found, err := m.TryResolve(vars)
- if err != nil {
- return nil, err
- }
- if !found {
- return nil, fmt.Errorf("no such attribute: %v", m.NamespacedAttribute)
- }
- return obj, nil
-}
-
-// TryResolve is an implementation of the NamespacedAttribute interface method which tests
+// Resolve is an implementation of the NamespacedAttribute interface method which tests
// for matching unknown attribute patterns and returns types.Unknown if present. Otherwise,
// the standard Resolve logic applies.
-func (m *attributeMatcher) TryResolve(vars Activation) (interface{}, bool, error) {
+func (m *attributeMatcher) Resolve(vars Activation) (any, error) {
id := m.NamespacedAttribute.ID()
// Bug in how partial activation is resolved, should search parents as well.
partial, isPartial := toPartialActivation(vars)
@@ -366,30 +346,23 @@ func (m *attributeMatcher) TryResolve(vars Activation) (interface{}, bool, error
m.CandidateVariableNames(),
m.qualifiers)
if err != nil {
- return nil, true, err
+ return nil, err
}
if unk != nil {
- return unk, true, nil
+ return unk, nil
}
}
- return m.NamespacedAttribute.TryResolve(vars)
+ return m.NamespacedAttribute.Resolve(vars)
}
// Qualify is an implementation of the Qualifier interface method.
-func (m *attributeMatcher) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := m.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := m.fac.NewQualifier(nil, m.ID(), val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (m *attributeMatcher) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(m.fac, vars, obj, m)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (m *attributeMatcher) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(m.fac, vars, obj, m, presenceOnly)
}
func toPartialActivation(vars Activation) (PartialActivation, bool) {
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attributes.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attributes.go
index 4f1772ea393b..5c8107ab7cbe 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attributes.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attributes.go
@@ -16,7 +16,6 @@ package interpreter
import (
"fmt"
- "math"
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/types"
@@ -61,7 +60,7 @@ type AttributeFactory interface {
// The qualifier may consider the object type being qualified, if present. If absent, the
// qualification should be considered dynamic and the qualification should still work, though
// it may be sub-optimal.
- NewQualifier(objType *exprpb.Type, qualID int64, val interface{}) (Qualifier, error)
+ NewQualifier(objType *exprpb.Type, qualID int64, val any, opt bool) (Qualifier, error)
}
// Qualifier marker interface for designating different qualifier values and where they appear
@@ -70,9 +69,21 @@ type Qualifier interface {
// ID where the qualifier appears within an expression.
ID() int64
+ // IsOptional specifies whether the qualifier is optional.
+ // Instead of a direct qualification, an optional qualifier will be resolved via QualifyIfPresent
+ // rather than Qualify. A non-optional qualifier may also be resolved through QualifyIfPresent if
+ // the object to qualify is itself optional.
+ IsOptional() bool
+
// Qualify performs a qualification, e.g. field selection, on the input object and returns
- // the value or error that results.
- Qualify(vars Activation, obj interface{}) (interface{}, error)
+ // the value of the access and whether the value was set. A non-nil value with a false presence
+ // test result indicates that the value being returned is the default value.
+ Qualify(vars Activation, obj any) (any, error)
+
+ // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object.
+ // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as
+ // to whether the qualifier is present.
+ QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error)
}
// ConstantQualifier interface embeds the Qualifier interface and provides an option to inspect the
@@ -82,6 +93,7 @@ type Qualifier interface {
type ConstantQualifier interface {
Qualifier
+ // Value returns the constant value associated with the qualifier.
Value() ref.Val
}
@@ -90,12 +102,16 @@ type ConstantQualifier interface {
type Attribute interface {
Qualifier
- // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid
- // qualifier type.
+ // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid qualifier type.
AddQualifier(Qualifier) (Attribute, error)
- // Resolve returns the value of the Attribute given the current Activation.
- Resolve(Activation) (interface{}, error)
+ // Resolve returns the value of the Attribute and whether it was present given an Activation.
+ // For objects which support safe traversal, the value may be non-nil and the presence flag be false.
+ //
+ // If an error is encountered during attribute resolution, it will be returned immediately.
+ // If the attribute cannot be resolved within the Activation, the result must be: `nil`, `error`
+ // with the error indicating which variable was missing.
+ Resolve(Activation) (any, error)
}
// NamespacedAttribute values are a variable within a namespace, and an optional set of qualifiers
@@ -107,22 +123,14 @@ type NamespacedAttribute interface {
// the CEL namespace resolution order.
CandidateVariableNames() []string
- // Qualifiers returns the list of qualifiers associated with the Attribute.s
+ // Qualifiers returns the list of qualifiers associated with the Attribute.
Qualifiers() []Qualifier
-
- // TryResolve attempts to return the value of the attribute given the current Activation.
- // If an error is encountered during attribute resolution, it will be returned immediately.
- // If the attribute cannot be resolved within the Activation, the result must be: `nil`,
- // `false`, `nil`.
- TryResolve(Activation) (interface{}, bool, error)
}
// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values
// capable of resolving types by simple names and qualify the values using the supported qualifier
// types: bool, int, string, and uint.
-func NewAttributeFactory(cont *containers.Container,
- a ref.TypeAdapter,
- p ref.TypeProvider) AttributeFactory {
+func NewAttributeFactory(cont *containers.Container, a ref.TypeAdapter, p ref.TypeProvider) AttributeFactory {
return &attrFactory{
container: cont,
adapter: a,
@@ -190,9 +198,7 @@ func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribu
}
// NewQualifier is an implementation of the AttributeFactory interface.
-func (r *attrFactory) NewQualifier(objType *exprpb.Type,
- qualID int64,
- val interface{}) (Qualifier, error) {
+func (r *attrFactory) NewQualifier(objType *exprpb.Type, qualID int64, val any, opt bool) (Qualifier, error) {
// Before creating a new qualifier check to see if this is a protobuf message field access.
// If so, use the precomputed GetFrom qualification method rather than the standard
// stringQualifier.
@@ -205,10 +211,11 @@ func (r *attrFactory) NewQualifier(objType *exprpb.Type,
Name: str,
FieldType: ft,
adapter: r.adapter,
+ optional: opt,
}, nil
}
}
- return newQualifier(r.adapter, qualID, val)
+ return newQualifier(r.adapter, qualID, val, opt)
}
type absoluteAttribute struct {
@@ -227,16 +234,11 @@ func (a *absoluteAttribute) ID() int64 {
return a.id
}
-// Cost implements the Coster interface method.
-func (a *absoluteAttribute) Cost() (min, max int64) {
- for _, q := range a.qualifiers {
- minQ, maxQ := estimateCost(q)
- min += minQ
- max += maxQ
- }
- min++ // For object retrieval.
- max++
- return
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *absoluteAttribute) IsOptional() bool {
+ return false
}
// AddQualifier implements the Attribute interface method.
@@ -256,33 +258,13 @@ func (a *absoluteAttribute) Qualifiers() []Qualifier {
}
// Qualify is an implementation of the Qualifier interface method.
-func (a *absoluteAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (a *absoluteAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
}
-// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute
-// variable is not found, or if its Qualifiers cannot be applied successfully.
-func (a *absoluteAttribute) Resolve(vars Activation) (interface{}, error) {
- obj, found, err := a.TryResolve(vars)
- if err != nil {
- return nil, err
- }
- if found {
- return obj, nil
- }
- return nil, fmt.Errorf("no such attribute: %v", a)
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *absoluteAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
}
// String implements the Stringer interface method.
@@ -290,36 +272,36 @@ func (a *absoluteAttribute) String() string {
return fmt.Sprintf("id: %v, names: %v", a.id, a.namespaceNames)
}
-// TryResolve iterates through the namespaced variable names until one is found within the
-// Activation or TypeProvider.
+// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute
+// variable is not found, or if its Qualifiers cannot be applied successfully.
//
// If the variable name cannot be found as an Activation variable or in the TypeProvider as
-// a type, then the result is `nil`, `false`, `nil` per the interface requirement.
-func (a *absoluteAttribute) TryResolve(vars Activation) (interface{}, bool, error) {
+// a type, then the result is `nil`, `error` with the error indicating the name of the first
+// variable searched as missing.
+func (a *absoluteAttribute) Resolve(vars Activation) (any, error) {
for _, nm := range a.namespaceNames {
// If the variable is found, process it. Otherwise, wait until the checks to
// determine whether the type is unknown before returning.
- op, found := vars.ResolveName(nm)
+ obj, found := vars.ResolveName(nm)
if found {
- var err error
- for _, qual := range a.qualifiers {
- op, err = qual.Qualify(vars, op)
- if err != nil {
- return nil, true, err
- }
+ obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers)
+ if err != nil {
+ return nil, err
}
- return op, true, nil
+ if isOpt {
+ return types.OptionalOf(a.adapter.NativeToValue(obj)), nil
+ }
+ return obj, nil
}
// Attempt to resolve the qualified type name if the name is not a variable identifier.
typ, found := a.provider.FindIdent(nm)
if found {
if len(a.qualifiers) == 0 {
- return typ, true, nil
+ return typ, nil
}
- return nil, true, fmt.Errorf("no such attribute: %v", typ)
}
}
- return nil, false, nil
+ return nil, missingAttribute(a.String())
}
type conditionalAttribute struct {
@@ -336,14 +318,11 @@ func (a *conditionalAttribute) ID() int64 {
return a.id
}
-// Cost provides the heuristic cost of a ternary operation ? : .
-// The cost is computed as cost(expr) plus the min/max costs of evaluating either
-// `t` or `f`.
-func (a *conditionalAttribute) Cost() (min, max int64) {
- tMin, tMax := estimateCost(a.truthy)
- fMin, fMax := estimateCost(a.falsy)
- eMin, eMax := estimateCost(a.expr)
- return eMin + findMin(tMin, fMin), eMax + findMax(tMax, fMax)
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *conditionalAttribute) IsOptional() bool {
+ return false
}
// AddQualifier appends the same qualifier to both sides of the conditional, in effect managing
@@ -361,28 +340,18 @@ func (a *conditionalAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
}
// Qualify is an implementation of the Qualifier interface method.
-func (a *conditionalAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (a *conditionalAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *conditionalAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
}
// Resolve evaluates the condition, and then resolves the truthy or falsy branch accordingly.
-func (a *conditionalAttribute) Resolve(vars Activation) (interface{}, error) {
+func (a *conditionalAttribute) Resolve(vars Activation) (any, error) {
val := a.expr.Eval(vars)
- if types.IsError(val) {
- return nil, val.(*types.Err)
- }
if val == types.True {
return a.truthy.Resolve(vars)
}
@@ -413,30 +382,11 @@ func (a *maybeAttribute) ID() int64 {
return a.id
}
-// Cost implements the Coster interface method. The min cost is computed as the minimal cost among
-// all the possible attributes, the max cost ditto.
-func (a *maybeAttribute) Cost() (min, max int64) {
- min, max = math.MaxInt64, 0
- for _, a := range a.attrs {
- minA, maxA := estimateCost(a)
- min = findMin(min, minA)
- max = findMax(max, maxA)
- }
- return
-}
-
-func findMin(x, y int64) int64 {
- if x < y {
- return x
- }
- return y
-}
-
-func findMax(x, y int64) int64 {
- if x > y {
- return x
- }
- return y
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *maybeAttribute) IsOptional() bool {
+ return false
}
// AddQualifier adds a qualifier to each possible attribute variant, and also creates
@@ -446,21 +396,21 @@ func findMax(x, y int64) int64 {
//
// 1. Create a maybe attribute from a simple identifier when it occurs in a parsed-only expression
//
-// mb = MaybeAttribute(, "a")
+// mb = MaybeAttribute(, "a")
//
-// Initializing the maybe attribute creates an absolute attribute internally which includes the
-// possible namespaced names of the attribute. In this example, let's assume we are in namespace
-// 'ns', then the maybe is either one of the following variable names:
+// Initializing the maybe attribute creates an absolute attribute internally which includes the
+// possible namespaced names of the attribute. In this example, let's assume we are in namespace
+// 'ns', then the maybe is either one of the following variable names:
//
-// possible variables names -- ns.a, a
+// possible variables names -- ns.a, a
//
// 2. Adding a qualifier to the maybe means that the variable name could be a longer qualified
-// name, or a field selection on one of the possible variable names produced earlier:
+// name, or a field selection on one of the possible variable names produced earlier:
//
-// mb.AddQualifier("b")
+// mb.AddQualifier("b")
//
-// possible variables names -- ns.a.b, a.b
-// possible field selection -- ns.a['b'], a['b']
+// possible variables names -- ns.a.b, a.b
+// possible field selection -- ns.a['b'], a['b']
//
// If none of the attributes within the maybe resolves a value, the result is an error.
func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
@@ -491,38 +441,42 @@ func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
}
// Qualify is an implementation of the Qualifier interface method.
-func (a *maybeAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (a *maybeAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *maybeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
}
// Resolve follows the variable resolution rules to determine whether the attribute is a variable
// or a field selection.
-func (a *maybeAttribute) Resolve(vars Activation) (interface{}, error) {
+func (a *maybeAttribute) Resolve(vars Activation) (any, error) {
+ var maybeErr error
for _, attr := range a.attrs {
- obj, found, err := attr.TryResolve(vars)
+ obj, err := attr.Resolve(vars)
// Return an error if one is encountered.
if err != nil {
- return nil, err
- }
- // If the object was found, return it.
- if found {
- return obj, nil
+ resErr, ok := err.(*resolutionError)
+ if !ok {
+ return nil, err
+ }
+ // If this was not a missing variable error, return it.
+ if !resErr.isMissingAttribute() {
+ return nil, err
+ }
+ // When the variable is missing in a maybe attribute we defer erroring.
+ if maybeErr == nil {
+ maybeErr = resErr
+ }
+ // Continue attempting to resolve possible variables.
+ continue
}
+ return obj, nil
}
// Else, produce a no such attribute error.
- return nil, fmt.Errorf("no such attribute: %v", a)
+ return nil, maybeErr
}
// String is an implementation of the Stringer interface method.
@@ -543,15 +497,11 @@ func (a *relativeAttribute) ID() int64 {
return a.id
}
-// Cost implements the Coster interface method.
-func (a *relativeAttribute) Cost() (min, max int64) {
- min, max = estimateCost(a.operand)
- for _, qual := range a.qualifiers {
- minQ, maxQ := estimateCost(qual)
- min += minQ
- max += maxQ
- }
- return
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *relativeAttribute) IsOptional() bool {
+ return false
}
// AddQualifier implements the Attribute interface method.
@@ -561,24 +511,17 @@ func (a *relativeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
}
// Qualify is an implementation of the Qualifier interface method.
-func (a *relativeAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (a *relativeAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *relativeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
}
// Resolve expression value and qualifier relative to the expression result.
-func (a *relativeAttribute) Resolve(vars Activation) (interface{}, error) {
+func (a *relativeAttribute) Resolve(vars Activation) (any, error) {
// First, evaluate the operand.
v := a.operand.Eval(vars)
if types.IsError(v) {
@@ -587,14 +530,12 @@ func (a *relativeAttribute) Resolve(vars Activation) (interface{}, error) {
if types.IsUnknown(v) {
return v, nil
}
- // Next, qualify it. Qualification handles unknowns as well, so there's no need to recheck.
- var err error
- var obj interface{} = v
- for _, qual := range a.qualifiers {
- obj, err = qual.Qualify(vars, obj)
- if err != nil {
- return nil, err
- }
+ obj, isOpt, err := applyQualifiers(vars, v, a.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if isOpt {
+ return types.OptionalOf(a.adapter.NativeToValue(obj)), nil
}
return obj, nil
}
@@ -604,42 +545,93 @@ func (a *relativeAttribute) String() string {
return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand)
}
-func newQualifier(adapter ref.TypeAdapter, id int64, v interface{}) (Qualifier, error) {
+func newQualifier(adapter ref.TypeAdapter, id int64, v any, opt bool) (Qualifier, error) {
var qual Qualifier
switch val := v.(type) {
case Attribute:
- return &attrQualifier{id: id, Attribute: val}, nil
+ // Note, attributes are initially identified as non-optional since they represent a top-level
+ // field access; however, when used as a relative qualifier, e.g. a[?b.c], then an attrQualifier
+ // is created which intercepts the IsOptional check for the attribute in order to return the
+ // correct result.
+ return &attrQualifier{
+ id: id,
+ Attribute: val,
+ optional: opt,
+ }, nil
case string:
- qual = &stringQualifier{id: id, value: val, celValue: types.String(val), adapter: adapter}
+ qual = &stringQualifier{
+ id: id,
+ value: val,
+ celValue: types.String(val),
+ adapter: adapter,
+ optional: opt,
+ }
case int:
- qual = &intQualifier{id: id, value: int64(val), celValue: types.Int(val), adapter: adapter}
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
case int32:
- qual = &intQualifier{id: id, value: int64(val), celValue: types.Int(val), adapter: adapter}
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
case int64:
- qual = &intQualifier{id: id, value: val, celValue: types.Int(val), adapter: adapter}
+ qual = &intQualifier{
+ id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
case uint:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter}
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
case uint32:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter}
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
case uint64:
- qual = &uintQualifier{id: id, value: val, celValue: types.Uint(val), adapter: adapter}
+ qual = &uintQualifier{
+ id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
case bool:
- qual = &boolQualifier{id: id, value: val, celValue: types.Bool(val), adapter: adapter}
+ qual = &boolQualifier{
+ id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt,
+ }
case float32:
- qual = &doubleQualifier{id: id, value: float64(val), celValue: types.Double(val), adapter: adapter}
+ qual = &doubleQualifier{
+ id: id,
+ value: float64(val),
+ celValue: types.Double(val),
+ adapter: adapter,
+ optional: opt,
+ }
case float64:
- qual = &doubleQualifier{id: id, value: val, celValue: types.Double(val), adapter: adapter}
+ qual = &doubleQualifier{
+ id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt,
+ }
case types.String:
- qual = &stringQualifier{id: id, value: string(val), celValue: val, adapter: adapter}
+ qual = &stringQualifier{
+ id: id, value: string(val), celValue: val, adapter: adapter, optional: opt,
+ }
case types.Int:
- qual = &intQualifier{id: id, value: int64(val), celValue: val, adapter: adapter}
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt,
+ }
case types.Uint:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: val, adapter: adapter}
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt,
+ }
case types.Bool:
- qual = &boolQualifier{id: id, value: bool(val), celValue: val, adapter: adapter}
+ qual = &boolQualifier{
+ id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt,
+ }
case types.Double:
- qual = &doubleQualifier{id: id, value: float64(val), celValue: val, adapter: adapter}
+ qual = &doubleQualifier{
+ id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt,
+ }
+ case types.Unknown:
+ qual = &unknownQualifier{id: id, value: val}
default:
+ if q, ok := v.(Qualifier); ok {
+ return q, nil
+ }
return nil, fmt.Errorf("invalid qualifier type: %T", v)
}
return qual, nil
@@ -648,15 +640,18 @@ func newQualifier(adapter ref.TypeAdapter, id int64, v interface{}) (Qualifier,
type attrQualifier struct {
id int64
Attribute
+ optional bool
}
+// ID implements the Qualifier interface method and returns the qualification instruction id
+// rather than the attribute id.
func (q *attrQualifier) ID() int64 {
return q.id
}
-// Cost returns zero for constant field qualifiers
-func (q *attrQualifier) Cost() (min, max int64) {
- return estimateCost(q.Attribute)
+// IsOptional implements the Qualifier interface method.
+func (q *attrQualifier) IsOptional() bool {
+ return q.optional
}
type stringQualifier struct {
@@ -664,6 +659,7 @@ type stringQualifier struct {
value string
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -671,58 +667,87 @@ func (q *stringQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *stringQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *stringQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *stringQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *stringQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
s := q.value
- isMap := false
- isKey := false
switch o := obj.(type) {
- case map[string]interface{}:
- isMap = true
- obj, isKey = o[s]
+ case map[string]any:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]string:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]int:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]int32:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]int64:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]uint:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]uint32:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]uint64:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]float32:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]float64:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]bool:
- isMap = true
- obj, isKey = o[s]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
}
- return elem, nil
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
}
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", s)
+ if presenceTest {
+ return nil, false, nil
}
- return obj, nil
+ return nil, false, missingKey(q.celValue)
}
// Value implements the ConstantQualifier interface
@@ -730,16 +755,12 @@ func (q *stringQualifier) Value() ref.Val {
return q.celValue
}
-// Cost returns zero for constant field qualifiers
-func (q *stringQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
type intQualifier struct {
id int64
value int64
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -747,97 +768,113 @@ func (q *intQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *intQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *intQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *intQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *intQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
i := q.value
- isMap := false
- isKey := false
- isIndex := false
+ var isMap bool
switch o := obj.(type) {
// The specialized map types supported by an int qualifier are considerably fewer than the set
// of specialized map types supported by string qualifiers since they are less frequently used
// than string-based map keys. Additional specializations may be added in the future if
// desired.
- case map[int]interface{}:
+ case map[int]any:
isMap = true
- obj, isKey = o[int(i)]
- case map[int32]interface{}:
+ obj, isKey := o[int(i)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[int32]any:
isMap = true
- obj, isKey = o[int32(i)]
- case map[int64]interface{}:
+ obj, isKey := o[int32(i)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[int64]any:
isMap = true
- obj, isKey = o[i]
- case []interface{}:
- isIndex = i >= 0 && i < int64(len(o))
+ obj, isKey := o[i]
+ if isKey {
+ return obj, true, nil
+ }
+ case []any:
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []string:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []int:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []int32:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []int64:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []uint:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []uint32:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []uint64:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []float32:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []float64:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []bool:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
- case types.Unknown:
- return o, nil
default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
- }
- return elem, nil
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
}
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", i)
+ if presenceTest {
+ return nil, false, nil
}
- if !isMap && !isIndex {
- return nil, fmt.Errorf("index out of bounds: %v", i)
+ if isMap {
+ return nil, false, missingKey(q.celValue)
}
- return obj, nil
+ return nil, false, missingIndex(q.celValue)
}
// Value implements the ConstantQualifier interface
@@ -845,16 +882,12 @@ func (q *intQualifier) Value() ref.Val {
return q.celValue
}
-// Cost returns zero for constant field qualifiers
-func (q *intQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
type uintQualifier struct {
id int64
value uint64
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -862,38 +895,51 @@ func (q *uintQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *uintQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *uintQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *uintQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *uintQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
u := q.value
- isMap := false
- isKey := false
switch o := obj.(type) {
// The specialized map types supported by a uint qualifier are considerably fewer than the set
// of specialized map types supported by string qualifiers since they are less frequently used
// than string-based map keys. Additional specializations may be added in the future if
// desired.
- case map[uint]interface{}:
- isMap = true
- obj, isKey = o[uint(u)]
- case map[uint32]interface{}:
- isMap = true
- obj, isKey = o[uint32(u)]
- case map[uint64]interface{}:
- isMap = true
- obj, isKey = o[u]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
+ case map[uint]any:
+ obj, isKey := o[uint(u)]
+ if isKey {
+ return obj, true, nil
}
- return elem, nil
+ case map[uint32]any:
+ obj, isKey := o[uint32(u)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[uint64]any:
+ obj, isKey := o[u]
+ if isKey {
+ return obj, true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
}
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", u)
+ if presenceTest {
+ return nil, false, nil
}
- return obj, nil
+ return nil, false, missingKey(q.celValue)
}
// Value implements the ConstantQualifier interface
@@ -901,16 +947,12 @@ func (q *uintQualifier) Value() ref.Val {
return q.celValue
}
-// Cost returns zero for constant field qualifiers
-func (q *uintQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
type boolQualifier struct {
id int64
value bool
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -918,30 +960,37 @@ func (q *boolQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *boolQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *boolQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *boolQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *boolQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
b := q.value
- isKey := false
switch o := obj.(type) {
- // The specialized map types supported by a bool qualifier are considerably fewer than the set
- // of specialized map types supported by string qualifiers since they are less frequently used
- // than string-based map keys. Additional specializations may be added in the future if
- // desired.
- case map[bool]interface{}:
- obj, isKey = o[b]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
+ case map[bool]any:
+ obj, isKey := o[b]
+ if isKey {
+ return obj, true, nil
}
- return elem, nil
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
}
- if !isKey {
- return nil, fmt.Errorf("no such key: %v", b)
+ if presenceTest {
+ return nil, false, nil
}
- return obj, nil
+ return nil, false, missingKey(q.celValue)
}
// Value implements the ConstantQualifier interface
@@ -949,11 +998,6 @@ func (q *boolQualifier) Value() ref.Val {
return q.celValue
}
-// Cost returns zero for constant field qualifiers
-func (q *boolQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
// fieldQualifier indicates that the qualification is a well-defined field with a known
// field type. When the field type is known this can be used to improve the speed and
// efficiency of field resolution.
@@ -962,6 +1006,7 @@ type fieldQualifier struct {
Name string
FieldType *ref.FieldType
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -969,12 +1014,39 @@ func (q *fieldQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *fieldQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *fieldQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *fieldQualifier) Qualify(vars Activation, obj any) (any, error) {
+ if rv, ok := obj.(ref.Val); ok {
+ obj = rv.Value()
+ }
+ val, err := q.FieldType.GetFrom(obj)
+ if err != nil {
+ return nil, err
+ }
+ return val, nil
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *fieldQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
if rv, ok := obj.(ref.Val); ok {
obj = rv.Value()
}
- return q.FieldType.GetFrom(obj)
+ if !q.FieldType.IsSet(obj) {
+ return nil, false, nil
+ }
+ if presenceOnly {
+ return nil, true, nil
+ }
+ val, err := q.FieldType.GetFrom(obj)
+ if err != nil {
+ return nil, false, err
+ }
+ return val, true, nil
}
// Value implements the ConstantQualifier interface
@@ -982,11 +1054,6 @@ func (q *fieldQualifier) Value() ref.Val {
return types.String(q.Name)
}
-// Cost returns zero for constant field qualifiers
-func (q *fieldQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
// doubleQualifier qualifies a CEL object, map, or list using a double value.
//
// This qualifier is used for working with dynamic data like JSON or protobuf.Any where the value
@@ -997,6 +1064,7 @@ type doubleQualifier struct {
value float64
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -1004,48 +1072,242 @@ func (q *doubleQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *doubleQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *doubleQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- switch o := obj.(type) {
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
+func (q *doubleQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *doubleQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+// unknownQualifier is a simple qualifier which always returns a preconfigured set of unknown values
+// for any value subject to qualification. This is consistent with CEL's unknown handling elsewhere.
+type unknownQualifier struct {
+ id int64
+ value types.Unknown
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *unknownQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional returns trivially false as an the unknown value is always returned.
+func (q *unknownQualifier) IsOptional() bool {
+ return false
+}
+
+// Qualify returns the unknown value associated with this qualifier.
+func (q *unknownQualifier) Qualify(vars Activation, obj any) (any, error) {
+ return q.value, nil
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *unknownQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.value, true, nil
+}
+
+// Value implements the ConstantQualifier interface
+func (q *unknownQualifier) Value() ref.Val {
+ return q.value
+}
+
+func applyQualifiers(vars Activation, obj any, qualifiers []Qualifier) (any, bool, error) {
+ optObj, isOpt := obj.(*types.Optional)
+ if isOpt {
+ if !optObj.HasValue() {
+ return optObj, false, nil
+ }
+ obj = optObj.GetValue().Value()
+ }
+
+ var err error
+ for _, qual := range qualifiers {
+ var qualObj any
+ isOpt = isOpt || qual.IsOptional()
+ if isOpt {
+ var present bool
+ qualObj, present, err = qual.QualifyIfPresent(vars, obj, false)
+ if err != nil {
+ return nil, false, err
+ }
+ if !present {
+ return types.OptionalNone, false, nil
+ }
+ } else {
+ qualObj, err = qual.Qualify(vars, obj)
+ if err != nil {
+ return nil, false, err
+ }
}
- return elem, nil
+ obj = qualObj
}
+ return obj, isOpt, nil
}
-// refResolve attempts to convert the value to a CEL value and then uses reflection methods
-// to try and resolve the qualifier.
-func refResolve(adapter ref.TypeAdapter, idx ref.Val, obj interface{}) (ref.Val, error) {
+// attrQualify performs a qualification using the result of an attribute evaluation.
+func attrQualify(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute) (any, error) {
+ val, err := qualAttr.Resolve(vars)
+ if err != nil {
+ return nil, err
+ }
+ qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional())
+ if err != nil {
+ return nil, err
+ }
+ return qual.Qualify(vars, obj)
+}
+
+// attrQualifyIfPresent conditionally performs the qualification of the result of attribute is present
+// on the target object.
+func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute,
+ presenceOnly bool) (any, bool, error) {
+ val, err := qualAttr.Resolve(vars)
+ if err != nil {
+ return nil, false, err
+ }
+ qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional())
+ if err != nil {
+ return nil, false, err
+ }
+ return qual.QualifyIfPresent(vars, obj, presenceOnly)
+}
+
+// refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and
+// apply the qualifier with the option to presence test field accesses before retrieving field values.
+func refQualify(adapter ref.TypeAdapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) {
celVal := adapter.NativeToValue(obj)
- mapper, isMapper := celVal.(traits.Mapper)
- if isMapper {
- elem, found := mapper.Find(idx)
- if !found {
- return nil, fmt.Errorf("no such key: %v", idx)
+ switch v := celVal.(type) {
+ case types.Unknown:
+ return v, true, nil
+ case *types.Err:
+ return nil, false, v
+ case traits.Mapper:
+ val, found := v.Find(idx)
+ if types.IsError(val) {
+ return nil, false, val.(*types.Err)
}
- return elem, nil
- }
- indexer, isIndexer := celVal.(traits.Indexer)
- if isIndexer {
- elem := indexer.Get(idx)
- if types.IsError(elem) {
- return nil, elem.(*types.Err)
+ if found {
+ return val, true, nil
}
- return elem, nil
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(idx)
+ case traits.Lister:
+ i, err := types.IndexOrError(idx)
+ if err != nil {
+ return nil, false, err
+ }
+ celIndex := types.Int(i)
+ if i >= 0 && celIndex < v.Size().(types.Int) {
+ return v.Get(idx), true, nil
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingIndex(idx)
+ case traits.Indexer:
+ if presenceTest {
+ ft, ok := v.(traits.FieldTester)
+ if ok {
+ presence := ft.IsSet(idx)
+ if types.IsError(presence) {
+ return nil, false, presence.(*types.Err)
+ }
+ if presenceOnly || presence == types.False {
+ return nil, presence == types.True, nil
+ }
+ }
+ }
+ val := v.Get(idx)
+ if types.IsError(val) {
+ return nil, false, val.(*types.Err)
+ }
+ return val, true, nil
+ default:
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(idx)
+ }
+}
+
+// resolutionError is a custom error type which encodes the different error states which may
+// occur during attribute resolution.
+type resolutionError struct {
+ missingAttribute string
+ missingIndex ref.Val
+ missingKey ref.Val
+}
+
+func (e *resolutionError) isMissingAttribute() bool {
+ return e.missingAttribute != ""
+}
+
+func missingIndex(missing ref.Val) *resolutionError {
+ return &resolutionError{
+ missingIndex: missing,
+ }
+}
+
+func missingKey(missing ref.Val) *resolutionError {
+ return &resolutionError{
+ missingKey: missing,
+ }
+}
+
+func missingAttribute(attr string) *resolutionError {
+ return &resolutionError{
+ missingAttribute: attr,
}
- if types.IsUnknown(celVal) {
- return celVal, nil
+}
+
+// Error implements the error interface method.
+func (e *resolutionError) Error() string {
+ if e.missingKey != nil {
+ return fmt.Sprintf("no such key: %v", e.missingKey)
+ }
+ if e.missingIndex != nil {
+ return fmt.Sprintf("index out of bounds: %v", e.missingIndex)
}
- // TODO: If the types.Err value contains more than just an error message at some point in the
- // future, then it would be reasonable to return error values as ref.Val types rather than
- // simple go error types.
- if types.IsError(celVal) {
- return nil, celVal.(*types.Err)
+ if e.missingAttribute != "" {
+ return fmt.Sprintf("no such attribute: %s", e.missingAttribute)
}
- return nil, fmt.Errorf("no such key: %v", idx)
+ return "invalid attribute"
+}
+
+// Is implements the errors.Is() method used by more recent versions of Go.
+func (e *resolutionError) Is(err error) bool {
+ return err.Error() == e.Error()
+}
+
+func findMin(x, y int64) int64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func findMax(x, y int64) int64 {
+ if x > y {
+ return x
+ }
+ return y
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/decorators.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/decorators.go
index bdbbad43e256..208487b7d3b0 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/decorators.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/decorators.go
@@ -29,7 +29,7 @@ type InterpretableDecorator func(Interpretable) (Interpretable, error)
func decObserveEval(observer EvalObserver) InterpretableDecorator {
return func(i Interpretable) (Interpretable, error) {
switch inst := i.(type) {
- case *evalWatch, *evalWatchAttr, *evalWatchConst:
+ case *evalWatch, *evalWatchAttr, *evalWatchConst, *evalWatchConstructor:
// these instruction are already watching, return straight-away.
return i, nil
case InterpretableAttribute:
@@ -42,6 +42,11 @@ func decObserveEval(observer EvalObserver) InterpretableDecorator {
InterpretableConst: inst,
observer: observer,
}, nil
+ case InterpretableConstructor:
+ return &evalWatchConstructor{
+ constructor: inst,
+ observer: observer,
+ }, nil
default:
return &evalWatch{
Interpretable: i,
@@ -224,8 +229,8 @@ func maybeOptimizeSetMembership(i Interpretable, inlist InterpretableCall) (Inte
valueSet := make(map[ref.Val]ref.Val)
for it.HasNext() == types.True {
elem := it.Next()
- if !types.IsPrimitiveType(elem) {
- // Note, non-primitive type are not yet supported.
+ if !types.IsPrimitiveType(elem) || elem.Type() == types.BytesType {
+ // Note, non-primitive type are not yet supported, and []byte isn't hashable.
return i, nil
}
valueSet[elem] = types.True
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/formatting.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/formatting.go
new file mode 100644
index 000000000000..6a98f6fa564d
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/formatting.go
@@ -0,0 +1,383 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+type typeVerifier func(int64, ...*types.TypeValue) (bool, error)
+
+// InterpolateFormattedString checks the syntax and cardinality of any string.format calls present in the expression and reports
+// any errors at compile time.
+func InterpolateFormattedString(verifier typeVerifier) InterpretableDecorator {
+ return func(inter Interpretable) (Interpretable, error) {
+ call, ok := inter.(InterpretableCall)
+ if !ok {
+ return inter, nil
+ }
+ if call.OverloadID() != "string_format" {
+ return inter, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return nil, fmt.Errorf("wrong number of arguments to string.format (expected 2, got %d)", len(args))
+ }
+ fmtStrInter, ok := args[0].(InterpretableConst)
+ if !ok {
+ return inter, nil
+ }
+ var fmtArgsInter InterpretableConstructor
+ fmtArgsInter, ok = args[1].(InterpretableConstructor)
+ if !ok {
+ return inter, nil
+ }
+ if fmtArgsInter.Type() != types.ListType {
+ // don't necessarily return an error since the list may be DynType
+ return inter, nil
+ }
+ formatStr := fmtStrInter.Value().Value().(string)
+ initVals := fmtArgsInter.InitVals()
+
+ formatCheck := &formatCheck{
+ args: initVals,
+ verifier: verifier,
+ }
+ // use a placeholder locale, since locale doesn't affect syntax
+ _, err := ParseFormatString(formatStr, formatCheck, formatCheck, "en_US")
+ if err != nil {
+ return nil, err
+ }
+ seenArgs := formatCheck.argsRequested
+ if len(initVals) > seenArgs {
+ return nil, fmt.Errorf("too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(initVals))
+ }
+ return inter, nil
+ }
+}
+
+type formatCheck struct {
+ args []Interpretable
+ argsRequested int
+ curArgIndex int64
+ enableCheckArgTypes bool
+ verifier typeVerifier
+}
+
+func (c *formatCheck) String(arg ref.Val, locale string) (string, error) {
+ valid, err := verifyString(c.args[c.curArgIndex], c.verifier)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Decimal(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("integer clause can only be used on integers")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Fixed(precision *int) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values
+ valid, err := c.verifier(id, types.DoubleType, types.StringType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("fixed-point clause can only be used on doubles")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Scientific(precision *int) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.DoubleType, types.StringType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("scientific clause can only be used on doubles")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Binary(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType, types.BoolType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("only integers and bools can be formatted as binary")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Hex(useUpper bool) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType, types.StringType, types.BytesType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("only integers, byte buffers, and strings can be formatted as hex")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Octal(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("octal clause can only be used on integers")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Arg(index int64) (ref.Val, error) {
+ c.argsRequested++
+ c.curArgIndex = index
+ // return a dummy value - this is immediately passed to back to us
+ // through one of the FormatCallback functions, so anything will do
+ return types.Int(0), nil
+}
+
+func (c *formatCheck) ArgSize() int64 {
+ return int64(len(c.args))
+}
+
+func verifyString(sub Interpretable, verifier typeVerifier) (bool, error) {
+ subVerified, err := verifier(sub.ID(),
+ types.ListType, types.MapType, types.IntType, types.UintType, types.DoubleType,
+ types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType, types.NullType)
+ if err != nil {
+ return false, err
+ }
+ if !subVerified {
+ return false, nil
+ }
+ con, ok := sub.(InterpretableConstructor)
+ if ok {
+ members := con.InitVals()
+ for _, m := range members {
+ // recursively verify if we're dealing with a list/map
+ verified, err := verifyString(m, verifier)
+ if err != nil {
+ return false, err
+ }
+ if !verified {
+ return false, nil
+ }
+ }
+ }
+ return true, nil
+
+}
+
+// FormatStringInterpolator is an interface that allows user-defined behavior
+// for formatting clause implementations, as well as argument retrieval.
+// Each function is expected to support the appropriate types as laid out in
+// the string.format documentation, and to return an error if given an inappropriate type.
+type FormatStringInterpolator interface {
+ // String takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a string, or an error if one occurred.
+ String(ref.Val, string) (string, error)
+
+ // Decimal takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a decimal integer, or an error if one occurred.
+ Decimal(ref.Val, string) (string, error)
+
+ // Fixed takes an int pointer representing precision (or nil if none was given) and
+ // returns a function operating in a similar manner to String and Decimal, taking a
+ // ref.Val and locale and returning the appropriate string. A closure is returned
+ // so precision can be set without needing an additional function call/configuration.
+ Fixed(*int) func(ref.Val, string) (string, error)
+
+ // Scientific functions identically to Fixed, except the string returned from the closure
+ // is expected to be in scientific notation.
+ Scientific(*int) func(ref.Val, string) (string, error)
+
+ // Binary takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a binary integer, or an error if one occurred.
+ Binary(ref.Val, string) (string, error)
+
+ // Hex takes a boolean that, if true, indicates the hex string output by the returned
+ // closure should use uppercase letters for A-F.
+ Hex(bool) func(ref.Val, string) (string, error)
+
+ // Octal takes a ref.Val and a string representing the current locale identifier and
+ // returns the Val formatted in octal, or an error if one occurred.
+ Octal(ref.Val, string) (string, error)
+}
+
+// FormatList is an interface that allows user-defined list-like datatypes to be used
+// for formatting clause implementations.
+type FormatList interface {
+ // Arg returns the ref.Val at the given index, or an error if one occurred.
+ Arg(int64) (ref.Val, error)
+ // ArgSize returns the length of the argument list.
+ ArgSize() int64
+}
+
+type clauseImpl func(ref.Val, string) (string, error)
+
+// ParseFormatString formats a string according to the string.format syntax, taking the clause implementations
+// from the provided FormatCallback and the args from the given FormatList.
+func ParseFormatString(formatStr string, callback FormatStringInterpolator, list FormatList, locale string) (string, error) {
+ i := 0
+ argIndex := 0
+ var builtStr strings.Builder
+ for i < len(formatStr) {
+ if formatStr[i] == '%' {
+ if i+1 < len(formatStr) && formatStr[i+1] == '%' {
+ err := builtStr.WriteByte('%')
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i += 2
+ continue
+ } else {
+ argAny, err := list.Arg(int64(argIndex))
+ if err != nil {
+ return "", err
+ }
+ if i+1 >= len(formatStr) {
+ return "", errors.New("unexpected end of string")
+ }
+ if int64(argIndex) >= list.ArgSize() {
+ return "", fmt.Errorf("index %d out of range", argIndex)
+ }
+ numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale)
+ if refErr != nil {
+ return "", refErr
+ }
+ _, err = builtStr.WriteString(val)
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i += numRead
+ argIndex++
+ }
+ } else {
+ err := builtStr.WriteByte(formatStr[i])
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i++
+ }
+ }
+ return builtStr.String(), nil
+}
+
+// parseAndFormatClause parses the format clause at the start of the given string with val, and returns
+// how many characters were consumed and the substituted string form of val, or an error if one occurred.
+func parseAndFormatClause(formatStr string, val ref.Val, callback FormatStringInterpolator, list FormatList, locale string) (int, string, error) {
+ i := 1
+ read, formatter, err := parseFormattingClause(formatStr[i:], callback)
+ i += read
+ if err != nil {
+ return -1, "", fmt.Errorf("could not parse formatting clause: %s", err)
+ }
+
+ valStr, err := formatter(val, locale)
+ if err != nil {
+ return -1, "", fmt.Errorf("error during formatting: %s", err)
+ }
+ return i, valStr, nil
+}
+
+func parseFormattingClause(formatStr string, callback FormatStringInterpolator) (int, clauseImpl, error) {
+ i := 0
+ read, precision, err := parsePrecision(formatStr[i:])
+ i += read
+ if err != nil {
+ return -1, nil, fmt.Errorf("error while parsing precision: %w", err)
+ }
+ r := rune(formatStr[i])
+ i++
+ switch r {
+ case 's':
+ return i, callback.String, nil
+ case 'd':
+ return i, callback.Decimal, nil
+ case 'f':
+ return i, callback.Fixed(precision), nil
+ case 'e':
+ return i, callback.Scientific(precision), nil
+ case 'b':
+ return i, callback.Binary, nil
+ case 'x', 'X':
+ return i, callback.Hex(unicode.IsUpper(r)), nil
+ case 'o':
+ return i, callback.Octal, nil
+ default:
+ return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r)
+ }
+}
+
+func parsePrecision(formatStr string) (int, *int, error) {
+ i := 0
+ if formatStr[i] != '.' {
+ return i, nil, nil
+ }
+ i++
+ var buffer strings.Builder
+ for {
+ if i >= len(formatStr) {
+ return -1, nil, errors.New("could not find end of precision specifier")
+ }
+ if !isASCIIDigit(rune(formatStr[i])) {
+ break
+ }
+ buffer.WriteByte(formatStr[i])
+ i++
+ }
+ precision, err := strconv.Atoi(buffer.String())
+ if err != nil {
+ return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err)
+ }
+ return i, &precision, nil
+}
+
+func isASCIIDigit(r rune) bool {
+ return r <= unicode.MaxASCII && unicode.IsDigit(r)
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/functions/functions.go
index dd1e9ddd5ff3..9816017522f1 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/functions/functions.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/functions/functions.go
@@ -58,5 +58,5 @@ type UnaryOp func(value ref.Val) ref.Val
type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val
// FunctionOp is a function with accepts zero or more arguments and produces
-// an value (as interface{}) or error as a result.
+// a value or error as a result.
type FunctionOp func(values ...ref.Val) ref.Val
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpretable.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpretable.go
index 4fdd12028bde..840779175e31 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpretable.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpretable.go
@@ -15,8 +15,6 @@
package interpreter
import (
- "math"
-
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
@@ -64,10 +62,17 @@ type InterpretableAttribute interface {
// Qualify replicates the Attribute.Qualify method to permit extension and interception
// of object qualification.
- Qualify(vars Activation, obj interface{}) (interface{}, error)
+ Qualify(vars Activation, obj any) (any, error)
+
+ // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object.
+ // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as
+ // to whether the qualifier is present.
+ QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error)
+
+ IsOptional() bool
// Resolve returns the value of the Attribute given the current Activation.
- Resolve(Activation) (interface{}, error)
+ Resolve(Activation) (any, error)
}
// InterpretableCall interface for inspecting Interpretable instructions related to function calls.
@@ -103,10 +108,10 @@ type InterpretableConstructor interface {
// Core Interpretable implementations used during the program planning phase.
type evalTestOnly struct {
- id int64
- op Interpretable
- field types.String
- fieldType *ref.FieldType
+ id int64
+ attr InterpretableAttribute
+ qual Qualifier
+ field types.String
}
// ID implements the Interpretable interface method.
@@ -116,44 +121,28 @@ func (test *evalTestOnly) ID() int64 {
// Eval implements the Interpretable interface method.
func (test *evalTestOnly) Eval(ctx Activation) ref.Val {
- // Handle field selection on a proto in the most efficient way possible.
- if test.fieldType != nil {
- opAttr, ok := test.op.(InterpretableAttribute)
- if ok {
- opVal, err := opAttr.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
- refVal, ok := opVal.(ref.Val)
- if ok {
- opVal = refVal.Value()
- }
- if test.fieldType.IsSet(opVal) {
- return types.True
- }
+ val, err := test.attr.Resolve(ctx)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ optVal, isOpt := val.(*types.Optional)
+ if isOpt {
+ if !optVal.HasValue() {
return types.False
}
+ val = optVal.GetValue()
}
-
- obj := test.op.Eval(ctx)
- tester, ok := obj.(traits.FieldTester)
- if ok {
- return tester.IsSet(test.field)
+ out, found, err := test.qual.QualifyIfPresent(ctx, val, true)
+ if err != nil {
+ return types.NewErr(err.Error())
}
- container, ok := obj.(traits.Container)
- if ok {
- return container.Contains(test.field)
+ if unk, isUnk := out.(types.Unknown); isUnk {
+ return unk
}
- return types.ValOrErr(obj, "invalid type for field selection.")
-}
-
-// Cost provides the heuristic cost of a `has(field)` macro. The cost has at least 1 for determining
-// if the field exists, apart from the cost of accessing the field.
-func (test *evalTestOnly) Cost() (min, max int64) {
- min, max = estimateCost(test.op)
- min++
- max++
- return
+ if found {
+ return types.True
+ }
+ return types.False
}
// NewConstValue creates a new constant valued Interpretable.
@@ -179,11 +168,6 @@ func (cons *evalConst) Eval(ctx Activation) ref.Val {
return cons.val
}
-// Cost returns zero for a constant valued Interpretable.
-func (cons *evalConst) Cost() (min, max int64) {
- return 0, 0
-}
-
// Value implements the InterpretableConst interface method.
func (cons *evalConst) Value() ref.Val {
return cons.val
@@ -233,12 +217,6 @@ func (or *evalOr) Eval(ctx Activation) ref.Val {
return types.ValOrErr(rVal, "no such overload")
}
-// Cost implements the Coster interface method. The minimum possible cost incurs when the left-hand
-// side expr is sufficient in determining the evaluation result.
-func (or *evalOr) Cost() (min, max int64) {
- return calShortCircuitBinaryOpsCost(or.lhs, or.rhs)
-}
-
type evalAnd struct {
id int64
lhs Interpretable
@@ -283,18 +261,6 @@ func (and *evalAnd) Eval(ctx Activation) ref.Val {
return types.ValOrErr(rVal, "no such overload")
}
-// Cost implements the Coster interface method. The minimum possible cost incurs when the left-hand
-// side expr is sufficient in determining the evaluation result.
-func (and *evalAnd) Cost() (min, max int64) {
- return calShortCircuitBinaryOpsCost(and.lhs, and.rhs)
-}
-
-func calShortCircuitBinaryOpsCost(lhs, rhs Interpretable) (min, max int64) {
- lMin, lMax := estimateCost(lhs)
- _, rMax := estimateCost(rhs)
- return lMin, lMax + rMax + 1
-}
-
type evalEq struct {
id int64
lhs Interpretable
@@ -319,11 +285,6 @@ func (eq *evalEq) Eval(ctx Activation) ref.Val {
return types.Equal(lVal, rVal)
}
-// Cost implements the Coster interface method.
-func (eq *evalEq) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(eq.lhs, eq.rhs)
-}
-
// Function implements the InterpretableCall interface method.
func (*evalEq) Function() string {
return operators.Equals
@@ -363,11 +324,6 @@ func (ne *evalNe) Eval(ctx Activation) ref.Val {
return types.Bool(types.Equal(lVal, rVal) != types.True)
}
-// Cost implements the Coster interface method.
-func (ne *evalNe) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(ne.lhs, ne.rhs)
-}
-
// Function implements the InterpretableCall interface method.
func (*evalNe) Function() string {
return operators.NotEquals
@@ -400,11 +356,6 @@ func (zero *evalZeroArity) Eval(ctx Activation) ref.Val {
return zero.impl()
}
-// Cost returns 1 representing the heuristic cost of the function.
-func (zero *evalZeroArity) Cost() (min, max int64) {
- return 1, 1
-}
-
// Function implements the InterpretableCall interface method.
func (zero *evalZeroArity) Function() string {
return zero.function
@@ -456,14 +407,6 @@ func (un *evalUnary) Eval(ctx Activation) ref.Val {
return types.NewErr("no such overload: %s", un.function)
}
-// Cost implements the Coster interface method.
-func (un *evalUnary) Cost() (min, max int64) {
- min, max = estimateCost(un.arg)
- min++ // add cost for function
- max++
- return
-}
-
// Function implements the InterpretableCall interface method.
func (un *evalUnary) Function() string {
return un.function
@@ -522,11 +465,6 @@ func (bin *evalBinary) Eval(ctx Activation) ref.Val {
return types.NewErr("no such overload: %s", bin.function)
}
-// Cost implements the Coster interface method.
-func (bin *evalBinary) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(bin.lhs, bin.rhs)
-}
-
// Function implements the InterpretableCall interface method.
func (bin *evalBinary) Function() string {
return bin.function
@@ -593,14 +531,6 @@ func (fn *evalVarArgs) Eval(ctx Activation) ref.Val {
return types.NewErr("no such overload: %s", fn.function)
}
-// Cost implements the Coster interface method.
-func (fn *evalVarArgs) Cost() (min, max int64) {
- min, max = sumOfCost(fn.args)
- min++ // add cost for function
- max++
- return
-}
-
// Function implements the InterpretableCall interface method.
func (fn *evalVarArgs) Function() string {
return fn.function
@@ -617,9 +547,11 @@ func (fn *evalVarArgs) Args() []Interpretable {
}
type evalList struct {
- id int64
- elems []Interpretable
- adapter ref.TypeAdapter
+ id int64
+ elems []Interpretable
+ optionals []bool
+ hasOptionals bool
+ adapter ref.TypeAdapter
}
// ID implements the Interpretable interface method.
@@ -629,14 +561,24 @@ func (l *evalList) ID() int64 {
// Eval implements the Interpretable interface method.
func (l *evalList) Eval(ctx Activation) ref.Val {
- elemVals := make([]ref.Val, len(l.elems))
+ elemVals := make([]ref.Val, 0, len(l.elems))
// If any argument is unknown or error early terminate.
for i, elem := range l.elems {
elemVal := elem.Eval(ctx)
if types.IsUnknownOrError(elemVal) {
return elemVal
}
- elemVals[i] = elemVal
+ if l.hasOptionals && l.optionals[i] {
+ optVal, ok := elemVal.(*types.Optional)
+ if !ok {
+ return invalidOptionalElementInit(elemVal)
+ }
+ if !optVal.HasValue() {
+ continue
+ }
+ elemVal = optVal.GetValue()
+ }
+ elemVals = append(elemVals, elemVal)
}
return l.adapter.NativeToValue(elemVals)
}
@@ -649,16 +591,13 @@ func (l *evalList) Type() ref.Type {
return types.ListType
}
-// Cost implements the Coster interface method.
-func (l *evalList) Cost() (min, max int64) {
- return sumOfCost(l.elems)
-}
-
type evalMap struct {
- id int64
- keys []Interpretable
- vals []Interpretable
- adapter ref.TypeAdapter
+ id int64
+ keys []Interpretable
+ vals []Interpretable
+ optionals []bool
+ hasOptionals bool
+ adapter ref.TypeAdapter
}
// ID implements the Interpretable interface method.
@@ -679,6 +618,17 @@ func (m *evalMap) Eval(ctx Activation) ref.Val {
if types.IsUnknownOrError(valVal) {
return valVal
}
+ if m.hasOptionals && m.optionals[i] {
+ optVal, ok := valVal.(*types.Optional)
+ if !ok {
+ return invalidOptionalEntryInit(keyVal, valVal)
+ }
+ if !optVal.HasValue() {
+ delete(entries, keyVal)
+ continue
+ }
+ valVal = optVal.GetValue()
+ }
entries[keyVal] = valVal
}
return m.adapter.NativeToValue(entries)
@@ -704,19 +654,14 @@ func (m *evalMap) Type() ref.Type {
return types.MapType
}
-// Cost implements the Coster interface method.
-func (m *evalMap) Cost() (min, max int64) {
- kMin, kMax := sumOfCost(m.keys)
- vMin, vMax := sumOfCost(m.vals)
- return kMin + vMin, kMax + vMax
-}
-
type evalObj struct {
- id int64
- typeName string
- fields []string
- vals []Interpretable
- provider ref.TypeProvider
+ id int64
+ typeName string
+ fields []string
+ vals []Interpretable
+ optionals []bool
+ hasOptionals bool
+ provider ref.TypeProvider
}
// ID implements the Interpretable interface method.
@@ -733,6 +678,17 @@ func (o *evalObj) Eval(ctx Activation) ref.Val {
if types.IsUnknownOrError(val) {
return val
}
+ if o.hasOptionals && o.optionals[i] {
+ optVal, ok := val.(*types.Optional)
+ if !ok {
+ return invalidOptionalEntryInit(field, val)
+ }
+ if !optVal.HasValue() {
+ delete(fieldVals, field)
+ continue
+ }
+ val = optVal.GetValue()
+ }
fieldVals[field] = val
}
return o.provider.NewValue(o.typeName, fieldVals)
@@ -746,21 +702,6 @@ func (o *evalObj) Type() ref.Type {
return types.NewObjectTypeValue(o.typeName)
}
-// Cost implements the Coster interface method.
-func (o *evalObj) Cost() (min, max int64) {
- return sumOfCost(o.vals)
-}
-
-func sumOfCost(interps []Interpretable) (min, max int64) {
- min, max = 0, 0
- for _, in := range interps {
- minT, maxT := estimateCost(in)
- min += minT
- max += maxT
- }
- return
-}
-
type evalFold struct {
id int64
accuVar string
@@ -842,38 +783,6 @@ func (fold *evalFold) Eval(ctx Activation) ref.Val {
return res
}
-// Cost implements the Coster interface method.
-func (fold *evalFold) Cost() (min, max int64) {
- // Compute the cost for evaluating iterRange.
- iMin, iMax := estimateCost(fold.iterRange)
-
- // Compute the size of iterRange. If the size depends on the input, return the maximum possible
- // cost range.
- foldRange := fold.iterRange.Eval(EmptyActivation())
- if !foldRange.Type().HasTrait(traits.IterableType) {
- return 0, math.MaxInt64
- }
- var rangeCnt int64
- it := foldRange.(traits.Iterable).Iterator()
- for it.HasNext() == types.True {
- it.Next()
- rangeCnt++
- }
- aMin, aMax := estimateCost(fold.accu)
- cMin, cMax := estimateCost(fold.cond)
- sMin, sMax := estimateCost(fold.step)
- rMin, rMax := estimateCost(fold.result)
- if fold.exhaustive {
- cMin = cMin * rangeCnt
- sMin = sMin * rangeCnt
- }
-
- // The cond and step costs are multiplied by size(iterRange). The minimum possible cost incurs
- // when the evaluation result can be determined by the first iteration.
- return iMin + aMin + cMin + sMin + rMin,
- iMax + aMax + cMax*rangeCnt + sMax*rangeCnt + rMax
-}
-
// Optional Interpretable implementations that specialize, subsume, or extend the core evaluation
// plan via decorators.
@@ -899,11 +808,6 @@ func (e *evalSetMembership) Eval(ctx Activation) ref.Val {
return types.False
}
-// Cost implements the Coster interface method.
-func (e *evalSetMembership) Cost() (min, max int64) {
- return estimateCost(e.arg)
-}
-
// evalWatch is an Interpretable implementation that wraps the execution of a given
// expression so that it may observe the computed value and send it to an observer.
type evalWatch struct {
@@ -918,15 +822,10 @@ func (e *evalWatch) Eval(ctx Activation) ref.Val {
return val
}
-// Cost implements the Coster interface method.
-func (e *evalWatch) Cost() (min, max int64) {
- return estimateCost(e.Interpretable)
-}
-
-// evalWatchAttr describes a watcher of an instAttr Interpretable.
+// evalWatchAttr describes a watcher of an InterpretableAttribute Interpretable.
//
// Since the watcher may be selected against at a later stage in program planning, the watcher
-// must implement the instAttr interface by proxy.
+// must implement the InterpretableAttribute interface by proxy.
type evalWatchAttr struct {
InterpretableAttribute
observer EvalObserver
@@ -953,11 +852,6 @@ func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) {
return e, err
}
-// Cost implements the Coster interface method.
-func (e *evalWatchAttr) Cost() (min, max int64) {
- return estimateCost(e.InterpretableAttribute)
-}
-
// Eval implements the Interpretable interface method.
func (e *evalWatchAttr) Eval(vars Activation) ref.Val {
val := e.InterpretableAttribute.Eval(vars)
@@ -973,13 +867,8 @@ type evalWatchConstQual struct {
adapter ref.TypeAdapter
}
-// Cost implements the Coster interface method.
-func (e *evalWatchConstQual) Cost() (min, max int64) {
- return estimateCost(e.ConstantQualifier)
-}
-
// Qualify observes the qualification of a object via a constant boolean, int, string, or uint.
-func (e *evalWatchConstQual) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) {
out, err := e.ConstantQualifier.Qualify(vars, obj)
var val ref.Val
if err != nil {
@@ -992,7 +881,7 @@ func (e *evalWatchConstQual) Qualify(vars Activation, obj interface{}) (interfac
}
// QualifierValueEquals tests whether the incoming value is equal to the qualifying constant.
-func (e *evalWatchConstQual) QualifierValueEquals(value interface{}) bool {
+func (e *evalWatchConstQual) QualifierValueEquals(value any) bool {
qve, ok := e.ConstantQualifier.(qualifierValueEquator)
return ok && qve.QualifierValueEquals(value)
}
@@ -1004,13 +893,8 @@ type evalWatchQual struct {
adapter ref.TypeAdapter
}
-// Cost implements the Coster interface method.
-func (e *evalWatchQual) Cost() (min, max int64) {
- return estimateCost(e.Qualifier)
-}
-
// Qualify observes the qualification of a object via a value computed at runtime.
-func (e *evalWatchQual) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) {
out, err := e.Qualifier.Qualify(vars, obj)
var val ref.Val
if err != nil {
@@ -1035,11 +919,6 @@ func (e *evalWatchConst) Eval(vars Activation) ref.Val {
return val
}
-// Cost implements the Coster interface method.
-func (e *evalWatchConst) Cost() (min, max int64) {
- return estimateCost(e.InterpretableConst)
-}
-
// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation.
type evalExhaustiveOr struct {
id int64
@@ -1078,12 +957,7 @@ func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val {
if types.IsError(lVal) {
return lVal
}
- return types.ValOrErr(rVal, "no such overload")
-}
-
-// Cost implements the Coster interface method.
-func (or *evalExhaustiveOr) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(or.lhs, or.rhs)
+ return types.MaybeNoSuchOverloadErr(rVal)
}
// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation.
@@ -1124,18 +998,7 @@ func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val {
if types.IsError(lVal) {
return lVal
}
- return types.ValOrErr(rVal, "no such overload")
-}
-
-// Cost implements the Coster interface method.
-func (and *evalExhaustiveAnd) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(and.lhs, and.rhs)
-}
-
-func calExhaustiveBinaryOpsCost(lhs, rhs Interpretable) (min, max int64) {
- lMin, lMax := estimateCost(lhs)
- rMin, rMax := estimateCost(rhs)
- return lMin + rMin + 1, lMax + rMax + 1
+ return types.MaybeNoSuchOverloadErr(rVal)
}
// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument
@@ -1154,33 +1017,29 @@ func (cond *evalExhaustiveConditional) ID() int64 {
// Eval implements the Interpretable interface method.
func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val {
cVal := cond.attr.expr.Eval(ctx)
- tVal, err := cond.attr.truthy.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
- fVal, err := cond.attr.falsy.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
+ tVal, tErr := cond.attr.truthy.Resolve(ctx)
+ fVal, fErr := cond.attr.falsy.Resolve(ctx)
cBool, ok := cVal.(types.Bool)
if !ok {
return types.ValOrErr(cVal, "no such overload")
}
if cBool {
+ if tErr != nil {
+ return types.NewErr(tErr.Error())
+ }
return cond.adapter.NativeToValue(tVal)
}
+ if fErr != nil {
+ return types.NewErr(fErr.Error())
+ }
return cond.adapter.NativeToValue(fVal)
}
-// Cost implements the Coster interface method.
-func (cond *evalExhaustiveConditional) Cost() (min, max int64) {
- return cond.attr.Cost()
-}
-
// evalAttr evaluates an Attribute value.
type evalAttr struct {
- adapter ref.TypeAdapter
- attr Attribute
+ adapter ref.TypeAdapter
+ attr Attribute
+ optional bool
}
// ID of the attribute instruction.
@@ -1188,28 +1047,23 @@ func (a *evalAttr) ID() int64 {
return a.attr.ID()
}
-// AddQualifier implements the instAttr interface method.
+// AddQualifier implements the InterpretableAttribute interface method.
func (a *evalAttr) AddQualifier(qual Qualifier) (Attribute, error) {
attr, err := a.attr.AddQualifier(qual)
a.attr = attr
return attr, err
}
-// Attr implements the instAttr interface method.
+// Attr implements the InterpretableAttribute interface method.
func (a *evalAttr) Attr() Attribute {
return a.attr
}
-// Adapter implements the instAttr interface method.
+// Adapter implements the InterpretableAttribute interface method.
func (a *evalAttr) Adapter() ref.TypeAdapter {
return a.adapter
}
-// Cost implements the Coster interface method.
-func (a *evalAttr) Cost() (min, max int64) {
- return estimateCost(a.attr)
-}
-
// Eval implements the Interpretable interface method.
func (a *evalAttr) Eval(ctx Activation) ref.Val {
v, err := a.attr.Resolve(ctx)
@@ -1220,11 +1074,55 @@ func (a *evalAttr) Eval(ctx Activation) ref.Val {
}
// Qualify proxies to the Attribute's Qualify method.
-func (a *evalAttr) Qualify(ctx Activation, obj interface{}) (interface{}, error) {
+func (a *evalAttr) Qualify(ctx Activation, obj any) (any, error) {
return a.attr.Qualify(ctx, obj)
}
+// QualifyIfPresent proxies to the Attribute's QualifyIfPresent method.
+func (a *evalAttr) QualifyIfPresent(ctx Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return a.attr.QualifyIfPresent(ctx, obj, presenceOnly)
+}
+
+func (a *evalAttr) IsOptional() bool {
+ return a.optional
+}
+
// Resolve proxies to the Attribute's Resolve method.
-func (a *evalAttr) Resolve(ctx Activation) (interface{}, error) {
+func (a *evalAttr) Resolve(ctx Activation) (any, error) {
return a.attr.Resolve(ctx)
}
+
+type evalWatchConstructor struct {
+ constructor InterpretableConstructor
+ observer EvalObserver
+}
+
+// InitVals implements the InterpretableConstructor InitVals function.
+func (c *evalWatchConstructor) InitVals() []Interpretable {
+ return c.constructor.InitVals()
+}
+
+// Type implements the InterpretableConstructor Type function.
+func (c *evalWatchConstructor) Type() ref.Type {
+ return c.constructor.Type()
+}
+
+// ID implements the Interpretable ID function.
+func (c *evalWatchConstructor) ID() int64 {
+ return c.constructor.ID()
+}
+
+// Eval implements the Interpretable Eval function.
+func (c *evalWatchConstructor) Eval(ctx Activation) ref.Val {
+ val := c.constructor.Eval(ctx)
+ c.observer(c.ID(), c.constructor, val)
+ return val
+}
+
+func invalidOptionalEntryInit(field any, value ref.Val) ref.Val {
+ return types.NewErr("cannot initialize optional entry '%v' from non-optional value %v", field, value)
+}
+
+func invalidOptionalElementInit(value ref.Val) ref.Val {
+ return types.NewErr("cannot initialize optional list element from non-optional value %v", value)
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpreter.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpreter.go
index b3fd14f8b397..707a6105a1ca 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpreter.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpreter.go
@@ -29,19 +29,17 @@ import (
type Interpreter interface {
// NewInterpretable creates an Interpretable from a checked expression and an
// optional list of InterpretableDecorator values.
- NewInterpretable(checked *exprpb.CheckedExpr,
- decorators ...InterpretableDecorator) (Interpretable, error)
+ NewInterpretable(checked *exprpb.CheckedExpr, decorators ...InterpretableDecorator) (Interpretable, error)
// NewUncheckedInterpretable returns an Interpretable from a parsed expression
// and an optional list of InterpretableDecorator values.
- NewUncheckedInterpretable(expr *exprpb.Expr,
- decorators ...InterpretableDecorator) (Interpretable, error)
+ NewUncheckedInterpretable(expr *exprpb.Expr, decorators ...InterpretableDecorator) (Interpretable, error)
}
// EvalObserver is a functional interface that accepts an expression id and an observed value.
// The id identifies the expression that was evaluated, the programStep is the Interpretable or Qualifier that
// was evaluated and value is the result of the evaluation.
-type EvalObserver func(id int64, programStep interface{}, value ref.Val)
+type EvalObserver func(id int64, programStep any, value ref.Val)
// Observe constructs a decorator that calls all the provided observers in order after evaluating each Interpretable
// or Qualifier during program evaluation.
@@ -49,7 +47,7 @@ func Observe(observers ...EvalObserver) InterpretableDecorator {
if len(observers) == 1 {
return decObserveEval(observers[0])
}
- observeFn := func(id int64, programStep interface{}, val ref.Val) {
+ observeFn := func(id int64, programStep any, val ref.Val) {
for _, observer := range observers {
observer(id, programStep, val)
}
@@ -96,7 +94,7 @@ func TrackState(state EvalState) InterpretableDecorator {
// This decorator is not thread-safe, and the EvalState must be reset between Eval()
// calls.
func EvalStateObserver(state EvalState) EvalObserver {
- return func(id int64, programStep interface{}, val ref.Val) {
+ return func(id int64, programStep any, val ref.Val) {
state.SetValue(id, val)
}
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/planner.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/planner.go
index 882e0419a5f9..9cf8e4e5c02f 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/planner.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/planner.go
@@ -189,16 +189,7 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) {
if err != nil {
return nil, err
}
-
- // Determine the field type if this is a proto message type.
- var fieldType *ref.FieldType
opType := p.typeMap[sel.GetOperand().GetId()]
- if opType.GetMessageType() != "" {
- ft, found := p.provider.FindFieldType(opType.GetMessageType(), sel.GetField())
- if found && ft.IsSet != nil && ft.GetFrom != nil {
- fieldType = ft
- }
- }
// If the Select was marked TestOnly, this is a presence test.
//
@@ -211,37 +202,35 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) {
// If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`,
// it is not clear whether has should error or follow the convention defined for structured
// values.
- if sel.TestOnly {
- // Return the test only eval expression.
- return &evalTestOnly{
- id: expr.GetId(),
- field: types.String(sel.GetField()),
- fieldType: fieldType,
- op: op,
- }, nil
- }
- // Build a qualifier.
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), sel.GetField())
- if err != nil {
- return nil, err
- }
- // Lastly, create a field selection Interpretable.
+
+ // Establish the attribute reference.
attr, isAttr := op.(InterpretableAttribute)
- if isAttr {
- _, err = attr.AddQualifier(qual)
- return attr, err
+ if !isAttr {
+ attr, err = p.relativeAttr(op.ID(), op, false)
+ if err != nil {
+ return nil, err
+ }
}
- relAttr, err := p.relativeAttr(op.ID(), op)
+ // Build a qualifier for the attribute.
+ qual, err := p.attrFactory.NewQualifier(opType, expr.GetId(), sel.GetField(), false)
if err != nil {
return nil, err
}
- _, err = relAttr.AddQualifier(qual)
- if err != nil {
- return nil, err
+
+ // Return the test only eval expression.
+ if sel.GetTestOnly() {
+ return &evalTestOnly{
+ id: expr.GetId(),
+ field: types.String(sel.GetField()),
+ attr: attr,
+ qual: qual,
+ }, nil
}
- return relAttr, nil
+
+ // Otherwise, append the qualifier on the attribute.
+ _, err = attr.AddQualifier(qual)
+ return attr, err
}
// planCall creates a callable Interpretable while specializing for common functions and invocation
@@ -286,7 +275,9 @@ func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) {
case operators.NotEquals:
return p.planCallNotEqual(expr, args)
case operators.Index:
- return p.planCallIndex(expr, args)
+ return p.planCallIndex(expr, args, false)
+ case operators.OptSelect, operators.OptIndex:
+ return p.planCallIndex(expr, args, true)
}
// Otherwise, generate Interpretable calls specialized by argument count.
@@ -423,8 +414,7 @@ func (p *planner) planCallVarArgs(expr *exprpb.Expr,
}
// planCallEqual generates an equals (==) Interpretable.
-func (p *planner) planCallEqual(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalEq{
id: expr.GetId(),
lhs: args[0],
@@ -433,8 +423,7 @@ func (p *planner) planCallEqual(expr *exprpb.Expr,
}
// planCallNotEqual generates a not equals (!=) Interpretable.
-func (p *planner) planCallNotEqual(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallNotEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalNe{
id: expr.GetId(),
lhs: args[0],
@@ -443,8 +432,7 @@ func (p *planner) planCallNotEqual(expr *exprpb.Expr,
}
// planCallLogicalAnd generates a logical and (&&) Interpretable.
-func (p *planner) planCallLogicalAnd(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalAnd{
id: expr.GetId(),
lhs: args[0],
@@ -453,8 +441,7 @@ func (p *planner) planCallLogicalAnd(expr *exprpb.Expr,
}
// planCallLogicalOr generates a logical or (||) Interpretable.
-func (p *planner) planCallLogicalOr(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallLogicalOr(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalOr{
id: expr.GetId(),
lhs: args[0],
@@ -463,10 +450,8 @@ func (p *planner) planCallLogicalOr(expr *exprpb.Expr,
}
// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable.
-func (p *planner) planCallConditional(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
cond := args[0]
-
t := args[1]
var tAttr Attribute
truthyAttr, isTruthyAttr := t.(InterpretableAttribute)
@@ -493,48 +478,54 @@ func (p *planner) planCallConditional(expr *exprpb.Expr,
// planCallIndex either extends an attribute with the argument to the index operation, or creates
// a relative attribute based on the return of a function call or operation.
-func (p *planner) planCallIndex(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optional bool) (Interpretable, error) {
op := args[0]
ind := args[1]
- opAttr, err := p.relativeAttr(op.ID(), op)
- if err != nil {
- return nil, err
- }
opType := p.typeMap[expr.GetCallExpr().GetTarget().GetId()]
- indConst, isIndConst := ind.(InterpretableConst)
- if isIndConst {
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), indConst.Value())
+
+ // Establish the attribute reference.
+ var err error
+ attr, isAttr := op.(InterpretableAttribute)
+ if !isAttr {
+ attr, err = p.relativeAttr(op.ID(), op, false)
if err != nil {
return nil, err
}
- _, err = opAttr.AddQualifier(qual)
- return opAttr, err
}
- indAttr, isIndAttr := ind.(InterpretableAttribute)
- if isIndAttr {
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), indAttr)
- if err != nil {
- return nil, err
- }
- _, err = opAttr.AddQualifier(qual)
- return opAttr, err
+
+ // Construct the qualifier type.
+ var qual Qualifier
+ switch ind := ind.(type) {
+ case InterpretableConst:
+ qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind.Value(), optional)
+ case InterpretableAttribute:
+ qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind, optional)
+ default:
+ qual, err = p.relativeAttr(expr.GetId(), ind, optional)
}
- indQual, err := p.relativeAttr(expr.GetId(), ind)
if err != nil {
return nil, err
}
- _, err = opAttr.AddQualifier(indQual)
- return opAttr, err
+
+ // Add the qualifier to the attribute
+ _, err = attr.AddQualifier(qual)
+ return attr, err
}
// planCreateList generates a list construction Interpretable.
func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) {
list := expr.GetListExpr()
- elems := make([]Interpretable, len(list.GetElements()))
- for i, elem := range list.GetElements() {
+ optionalIndices := list.GetOptionalIndices()
+ elements := list.GetElements()
+ optionals := make([]bool, len(elements))
+ for _, index := range optionalIndices {
+ if index < 0 || index >= int32(len(elements)) {
+ return nil, fmt.Errorf("optional index %d out of element bounds [0, %d]", index, len(elements))
+ }
+ optionals[index] = true
+ }
+ elems := make([]Interpretable, len(elements))
+ for i, elem := range elements {
elemVal, err := p.Plan(elem)
if err != nil {
return nil, err
@@ -542,9 +533,11 @@ func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) {
elems[i] = elemVal
}
return &evalList{
- id: expr.GetId(),
- elems: elems,
- adapter: p.adapter,
+ id: expr.GetId(),
+ elems: elems,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ adapter: p.adapter,
}, nil
}
@@ -555,6 +548,7 @@ func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) {
return p.planCreateObj(expr)
}
entries := str.GetEntries()
+ optionals := make([]bool, len(entries))
keys := make([]Interpretable, len(entries))
vals := make([]Interpretable, len(entries))
for i, entry := range entries {
@@ -569,23 +563,27 @@ func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) {
return nil, err
}
vals[i] = valVal
+ optionals[i] = entry.GetOptionalEntry()
}
return &evalMap{
- id: expr.GetId(),
- keys: keys,
- vals: vals,
- adapter: p.adapter,
+ id: expr.GetId(),
+ keys: keys,
+ vals: vals,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ adapter: p.adapter,
}, nil
}
// planCreateObj generates an object construction Interpretable.
func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) {
obj := expr.GetStructExpr()
- typeName, defined := p.resolveTypeName(obj.MessageName)
+ typeName, defined := p.resolveTypeName(obj.GetMessageName())
if !defined {
- return nil, fmt.Errorf("unknown type: %s", typeName)
+ return nil, fmt.Errorf("unknown type: %s", obj.GetMessageName())
}
entries := obj.GetEntries()
+ optionals := make([]bool, len(entries))
fields := make([]string, len(entries))
vals := make([]Interpretable, len(entries))
for i, entry := range entries {
@@ -595,13 +593,16 @@ func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) {
return nil, err
}
vals[i] = val
+ optionals[i] = entry.GetOptionalEntry()
}
return &evalObj{
- id: expr.GetId(),
- typeName: typeName,
- fields: fields,
- vals: vals,
- provider: p.provider,
+ id: expr.GetId(),
+ typeName: typeName,
+ fields: fields,
+ vals: vals,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ provider: p.provider,
}, nil
}
@@ -753,14 +754,18 @@ func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, stri
return target, fnName, ""
}
-func (p *planner) relativeAttr(id int64, eval Interpretable) (InterpretableAttribute, error) {
+// relativeAttr indicates that the attribute in this case acts as a qualifier and as such needs to
+// be observed to ensure that it's evaluation value is properly recorded for state tracking.
+func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (InterpretableAttribute, error) {
eAttr, ok := eval.(InterpretableAttribute)
if !ok {
eAttr = &evalAttr{
- adapter: p.adapter,
- attr: p.attrFactory.RelativeAttribute(id, eval),
+ adapter: p.adapter,
+ attr: p.attrFactory.RelativeAttribute(id, eval),
+ optional: opt,
}
}
+ // This looks like it should either decorate the new evalAttr node, or early return the InterpretableAttribute
decAttr, err := p.decorate(eAttr, nil)
if err != nil {
return nil, err
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/prune.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/prune.go
index eab46e0c06eb..b8b015a7a65b 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/prune.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/prune.go
@@ -26,6 +26,7 @@ import (
type astPruner struct {
expr *exprpb.Expr
+ macroCalls map[int64]*exprpb.Expr
state EvalState
nextExprID int64
}
@@ -65,13 +66,17 @@ type astPruner struct {
// compiled and constant folded expressions, but is not willing to constant
// fold(and thus cache results of) some external calls, then they can prepare
// the overloads accordingly.
-func PruneAst(expr *exprpb.Expr, state EvalState) *exprpb.Expr {
+func PruneAst(expr *exprpb.Expr, macroCalls map[int64]*exprpb.Expr, state EvalState) *exprpb.ParsedExpr {
pruner := &astPruner{
expr: expr,
+ macroCalls: macroCalls,
state: state,
nextExprID: 1}
- newExpr, _ := pruner.prune(expr)
- return newExpr
+ newExpr, _ := pruner.maybePrune(expr)
+ return &exprpb.ParsedExpr{
+ Expr: newExpr,
+ SourceInfo: &exprpb.SourceInfo{MacroCalls: pruner.macroCalls},
+ }
}
func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr {
@@ -223,6 +228,14 @@ func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) {
return nil, false
}
+func (p *astPruner) maybePrune(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ out, pruned := p.prune(node)
+ if pruned {
+ delete(p.macroCalls, node.GetId())
+ }
+ return out, pruned
+}
+
func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
if node == nil {
return node, false
@@ -240,7 +253,7 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
switch node.GetExprKind().(type) {
case *exprpb.Expr_SelectExpr:
- if operand, pruned := p.prune(node.GetSelectExpr().GetOperand()); pruned {
+ if operand, pruned := p.maybePrune(node.GetSelectExpr().GetOperand()); pruned {
return &exprpb.Expr{
Id: node.GetId(),
ExprKind: &exprpb.Expr_SelectExpr{
@@ -254,7 +267,7 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
}
case *exprpb.Expr_CallExpr:
if newExpr, pruned := p.maybePruneFunction(node); pruned {
- newExpr, _ = p.prune(newExpr)
+ newExpr, _ = p.maybePrune(newExpr)
return newExpr, true
}
var prunedCall bool
@@ -268,12 +281,12 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
}
for i, arg := range args {
newArgs[i] = arg
- if newArg, prunedArg := p.prune(arg); prunedArg {
+ if newArg, prunedArg := p.maybePrune(arg); prunedArg {
prunedCall = true
newArgs[i] = newArg
}
}
- if newTarget, prunedTarget := p.prune(call.GetTarget()); prunedTarget {
+ if newTarget, prunedTarget := p.maybePrune(call.GetTarget()); prunedTarget {
prunedCall = true
newCall.Target = newTarget
}
@@ -291,7 +304,7 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
var prunedList bool
for i, elem := range elems {
newElems[i] = elem
- if newElem, prunedElem := p.prune(elem); prunedElem {
+ if newElem, prunedElem := p.maybePrune(elem); prunedElem {
newElems[i] = newElem
prunedList = true
}
@@ -313,8 +326,8 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
newEntries := make([]*exprpb.Expr_CreateStruct_Entry, len(entries))
for i, entry := range entries {
newEntries[i] = entry
- newKey, prunedKey := p.prune(entry.GetMapKey())
- newValue, prunedValue := p.prune(entry.GetValue())
+ newKey, prunedKey := p.maybePrune(entry.GetMapKey())
+ newValue, prunedValue := p.maybePrune(entry.GetValue())
if !prunedKey && !prunedValue {
continue
}
@@ -349,7 +362,7 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
// Only the range of the comprehension is pruned since the state tracking only records
// the last iteration of the comprehension and not each step in the evaluation which
// means that the any residuals computed in between might be inaccurate.
- if newRange, pruned := p.prune(compre.GetIterRange()); pruned {
+ if newRange, pruned := p.maybePrune(compre.GetIterRange()); pruned {
return &exprpb.Expr{
Id: node.GetId(),
ExprKind: &exprpb.Expr_ComprehensionExpr{
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/runtimecost.go
index 06b6b27ef1f1..e7daf011fc71 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/runtimecost.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/runtimecost.go
@@ -36,7 +36,7 @@ type ActualCostEstimator interface {
// CostObserver provides an observer that tracks runtime cost.
func CostObserver(tracker *CostTracker) EvalObserver {
- observer := func(id int64, programStep interface{}, val ref.Val) {
+ observer := func(id int64, programStep any, val ref.Val) {
switch t := programStep.(type) {
case ConstantQualifier:
// TODO: Push identifiers on to the stack before observing constant qualifiers that apply to them
@@ -69,6 +69,8 @@ func CostObserver(tracker *CostTracker) EvalObserver {
tracker.stack.drop(t.rhs.ID(), t.lhs.ID())
case *evalFold:
tracker.stack.drop(t.iterRange.ID())
+ case *evalTestOnly:
+ tracker.cost += common.SelectAndIdentCost
case Qualifier:
tracker.cost++
case InterpretableCall:
@@ -122,7 +124,7 @@ func (c CostTracker) costCall(call InterpretableCall, argValues []ref.Val, resul
// if user has their own implementation of ActualCostEstimator, make sure to cover the mapping between overloadId and cost calculation
switch call.OverloadID() {
// O(n) functions
- case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString:
+ case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString:
cost += uint64(math.Ceil(float64(c.actualSize(argValues[0])) * common.StringTraversalCostFactor))
case overloads.InList:
// If a list is composed entirely of constant values this is O(1), but we don't account for that here.
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/BUILD.bazel
index b76e6e484412..b5c15fa570da 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/BUILD.bazel
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/BUILD.bazel
@@ -34,6 +34,7 @@ go_test(
name = "go_default_test",
size = "small",
srcs = [
+ "helper_test.go",
"parser_test.go",
"unescape_test.go",
"unparser_test.go",
@@ -47,5 +48,6 @@ go_test(
"//test:go_default_library",
"@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//testing/protocmp:go_default_library",
],
)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/CEL.g4
index 11145ec37424..b011da803ce2 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/CEL.g4
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/CEL.g4
@@ -52,16 +52,18 @@ unary
member
: primary # PrimaryExpr
- | member op='.' id=IDENTIFIER (open='(' args=exprList? ')')? # SelectOrCall
- | member op='[' index=expr ']' # Index
- | member op='{' entries=fieldInitializerList? ','? '}' # CreateMessage
+ | member op='.' (opt='?')? id=IDENTIFIER # Select
+ | member op='.' id=IDENTIFIER open='(' args=exprList? ')' # MemberCall
+ | member op='[' (opt='?')? index=expr ']' # Index
;
primary
: leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall
| '(' e=expr ')' # Nested
- | op='[' elems=exprList? ','? ']' # CreateList
+ | op='[' elems=listInit? ','? ']' # CreateList
| op='{' entries=mapInitializerList? ','? '}' # CreateStruct
+ | leadingDot='.'? ids+=IDENTIFIER (ops+='.' ids+=IDENTIFIER)*
+ op='{' entries=fieldInitializerList? ','? '}' # CreateMessage
| literal # ConstantLiteral
;
@@ -69,23 +71,35 @@ exprList
: e+=expr (',' e+=expr)*
;
+listInit
+ : elems+=optExpr (',' elems+=optExpr)*
+ ;
+
fieldInitializerList
- : fields+=IDENTIFIER cols+=':' values+=expr (',' fields+=IDENTIFIER cols+=':' values+=expr)*
+ : fields+=optField cols+=':' values+=expr (',' fields+=optField cols+=':' values+=expr)*
+ ;
+
+optField
+ : (opt='?')? IDENTIFIER
;
mapInitializerList
- : keys+=expr cols+=':' values+=expr (',' keys+=expr cols+=':' values+=expr)*
+ : keys+=optExpr cols+=':' values+=expr (',' keys+=optExpr cols+=':' values+=expr)*
+ ;
+
+optExpr
+ : (opt='?')? e=expr
;
literal
: sign=MINUS? tok=NUM_INT # Int
- | tok=NUM_UINT # Uint
+ | tok=NUM_UINT # Uint
| sign=MINUS? tok=NUM_FLOAT # Double
- | tok=STRING # String
- | tok=BYTES # Bytes
- | tok=CEL_TRUE # BoolTrue
- | tok=CEL_FALSE # BoolFalse
- | tok=NUL # Null
+ | tok=STRING # String
+ | tok=BYTES # Bytes
+ | tok=CEL_TRUE # BoolTrue
+ | tok=CEL_FALSE # BoolFalse
+ | tok=NUL # Null
;
// Lexer Rules
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/CEL.interp
index 13e3a10d175c..75b8bb3e2032 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/CEL.interp
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/CEL.interp
@@ -87,10 +87,13 @@ unary
member
primary
exprList
+listInit
fieldInitializerList
+optField
mapInitializerList
+optExpr
literal
atn:
-[4, 1, 36, 209, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 41, 8, 2, 10, 2, 12, 2, 44, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 49, 8, 3, 10, 3, 12, 3, 52, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 60, 8, 4, 10, 4, 12, 4, 63, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 74, 8, 5, 10, 5, 12, 5, 77, 9, 5, 1, 6, 1, 6, 4, 6, 81, 8, 6, 11, 6, 12, 6, 82, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 3, 6, 92, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 102, 8, 7, 1, 7, 3, 7, 105, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 115, 8, 7, 1, 7, 3, 7, 118, 8, 7, 1, 7, 5, 7, 121, 8, 7, 10, 7, 12, 7, 124, 9, 7, 1, 8, 3, 8, 127, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 132, 8, 8, 1, 8, 3, 8, 135, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 143, 8, 8, 1, 8, 3, 8, 146, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8, 8, 1, 8, 1, 8, 3, 8, 158, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 163, 8, 9, 10, 9, 12, 9, 166, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5, 10, 175, 8, 10, 10, 10, 12, 10, 178, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 188, 8, 11, 10, 11, 12, 11, 191, 9, 11, 1, 12, 3, 12, 194, 8, 12, 1, 12, 1, 12, 1, 12, 3, 12, 199, 8, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 3, 12, 207, 8, 12, 1, 12, 0, 3, 8, 10, 14, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 235, 0, 26, 1, 0, 0, 0, 2, 29, 1, 0, 0, 0, 4, 37, 1, 0, 0, 0, 6, 45, 1, 0, 0, 0, 8, 53, 1, 0, 0, 0, 10, 64, 1, 0, 0, 0, 12, 91, 1, 0, 0, 0, 14, 93, 1, 0, 0, 0, 16, 157, 1, 0, 0, 0, 18, 159, 1, 0, 0, 0, 20, 167, 1, 0, 0, 0, 22, 179, 1, 0, 0, 0, 24, 206, 1, 0, 0, 0, 26, 27, 3, 2, 1, 0, 27, 28, 5, 0, 0, 1, 28, 1, 1, 0, 0, 0, 29, 35, 3, 4, 2, 0, 30, 31, 5, 20, 0, 0, 31, 32, 3, 4, 2, 0, 32, 33, 5, 21, 0, 0, 33, 34, 3, 2, 1, 0, 34, 36, 1, 0, 0, 0, 35, 30, 1, 0, 0, 0, 35, 36, 1, 0, 0, 0, 36, 3, 1, 0, 0, 0, 37, 42, 3, 6, 3, 0, 38, 39, 5, 9, 0, 0, 39, 41, 3, 6, 3, 0, 40, 38, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0, 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 5, 1, 0, 0, 0, 44, 42, 1, 0, 0, 0, 45, 50, 3, 8, 4, 0, 46, 47, 5, 8, 0, 0, 47, 49, 3, 8, 4, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 7, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 54, 6, 4, -1, 0, 54, 55, 3, 10, 5, 0, 55, 61, 1, 0, 0, 0, 56, 57, 10, 1, 0, 0, 57, 58, 7, 0, 0, 0, 58, 60, 3, 8, 4, 2, 59, 56, 1, 0, 0, 0, 60, 63, 1, 0, 0, 0, 61, 59, 1, 0, 0, 0, 61, 62, 1, 0, 0, 0, 62, 9, 1, 0, 0, 0, 63, 61, 1, 0, 0, 0, 64, 65, 6, 5, -1, 0, 65, 66, 3, 12, 6, 0, 66, 75, 1, 0, 0, 0, 67, 68, 10, 2, 0, 0, 68, 69, 7, 1, 0, 0, 69, 74, 3, 10, 5, 3, 70, 71, 10, 1, 0, 0, 71, 72, 7, 2, 0, 0, 72, 74, 3, 10, 5, 2, 73, 67, 1, 0, 0, 0, 73, 70, 1, 0, 0, 0, 74, 77, 1, 0, 0, 0, 75, 73, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 11, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 78, 92, 3, 14, 7, 0, 79, 81, 5, 19, 0, 0, 80, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83, 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 92, 3, 14, 7, 0, 85, 87, 5, 18, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 92, 3, 14, 7, 0, 91, 78, 1, 0, 0, 0, 91, 80, 1, 0, 0, 0, 91, 86, 1, 0, 0, 0, 92, 13, 1, 0, 0, 0, 93, 94, 6, 7, -1, 0, 94, 95, 3, 16, 8, 0, 95, 122, 1, 0, 0, 0, 96, 97, 10, 3, 0, 0, 97, 98, 5, 16, 0, 0, 98, 104, 5, 36, 0, 0, 99, 101, 5, 14, 0, 0, 100, 102, 3, 18, 9, 0, 101, 100, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 103, 1, 0, 0, 0, 103, 105, 5, 15, 0, 0, 104, 99, 1, 0, 0, 0, 104, 105, 1, 0, 0, 0, 105, 121, 1, 0, 0, 0, 106, 107, 10, 2, 0, 0, 107, 108, 5, 10, 0, 0, 108, 109, 3, 2, 1, 0, 109, 110, 5, 11, 0, 0, 110, 121, 1, 0, 0, 0, 111, 112, 10, 1, 0, 0, 112, 114, 5, 12, 0, 0, 113, 115, 3, 20, 10, 0, 114, 113, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 5, 17, 0, 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119, 121, 5, 13, 0, 0, 120, 96, 1, 0, 0, 0, 120, 106, 1, 0, 0, 0, 120, 111, 1, 0, 0, 0, 121, 124, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 122, 123, 1, 0, 0, 0, 123, 15, 1, 0, 0, 0, 124, 122, 1, 0, 0, 0, 125, 127, 5, 16, 0, 0, 126, 125, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 134, 5, 36, 0, 0, 129, 131, 5, 14, 0, 0, 130, 132, 3, 18, 9, 0, 131, 130, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 135, 5, 15, 0, 0, 134, 129, 1, 0, 0, 0, 134, 135, 1, 0, 0, 0, 135, 158, 1, 0, 0, 0, 136, 137, 5, 14, 0, 0, 137, 138, 3, 2, 1, 0, 138, 139, 5, 15, 0, 0, 139, 158, 1, 0, 0, 0, 140, 142, 5, 10, 0, 0, 141, 143, 3, 18, 9, 0, 142, 141, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 145, 1, 0, 0, 0, 144, 146, 5, 17, 0, 0, 145, 144, 1, 0, 0, 0, 145, 146, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 158, 5, 11, 0, 0, 148, 150, 5, 12, 0, 0, 149, 151, 3, 22, 11, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154, 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 158, 5, 13, 0, 0, 156, 158, 3, 24, 12, 0, 157, 126, 1, 0, 0, 0, 157, 136, 1, 0, 0, 0, 157, 140, 1, 0, 0, 0, 157, 148, 1, 0, 0, 0, 157, 156, 1, 0, 0, 0, 158, 17, 1, 0, 0, 0, 159, 164, 3, 2, 1, 0, 160, 161, 5, 17, 0, 0, 161, 163, 3, 2, 1, 0, 162, 160, 1, 0, 0, 0, 163, 166, 1, 0, 0, 0, 164, 162, 1, 0, 0, 0, 164, 165, 1, 0, 0, 0, 165, 19, 1, 0, 0, 0, 166, 164, 1, 0, 0, 0, 167, 168, 5, 36, 0, 0, 168, 169, 5, 21, 0, 0, 169, 176, 3, 2, 1, 0, 170, 171, 5, 17, 0, 0, 171, 172, 5, 36, 0, 0, 172, 173, 5, 21, 0, 0, 173, 175, 3, 2, 1, 0, 174, 170, 1, 0, 0, 0, 175, 178, 1, 0, 0, 0, 176, 174, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 21, 1, 0, 0, 0, 178, 176, 1, 0, 0, 0, 179, 180, 3, 2, 1, 0, 180, 181, 5, 21, 0, 0, 181, 189, 3, 2, 1, 0, 182, 183, 5, 17, 0, 0, 183, 184, 3, 2, 1, 0, 184, 185, 5, 21, 0, 0, 185, 186, 3, 2, 1, 0, 186, 188, 1, 0, 0, 0, 187, 182, 1, 0, 0, 0, 188, 191, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 189, 190, 1, 0, 0, 0, 190, 23, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 192, 194, 5, 18, 0, 0, 193, 192, 1, 0, 0, 0, 193, 194, 1, 0, 0, 0, 194, 195, 1, 0, 0, 0, 195, 207, 5, 32, 0, 0, 196, 207, 5, 33, 0, 0, 197, 199, 5, 18, 0, 0, 198, 197, 1, 0, 0, 0, 198, 199, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 207, 5, 31, 0, 0, 201, 207, 5, 34, 0, 0, 202, 207, 5, 35, 0, 0, 203, 207, 5, 26, 0, 0, 204, 207, 5, 27, 0, 0, 205, 207, 5, 28, 0, 0, 206, 193, 1, 0, 0, 0, 206, 196, 1, 0, 0, 0, 206, 198, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 206, 202, 1, 0, 0, 0, 206, 203, 1, 0, 0, 0, 206, 204, 1, 0, 0, 0, 206, 205, 1, 0, 0, 0, 207, 25, 1, 0, 0, 0, 29, 35, 42, 50, 61, 73, 75, 82, 88, 91, 101, 104, 114, 117, 120, 122, 126, 131, 134, 142, 145, 150, 153, 157, 164, 176, 189, 193, 198, 206]
\ No newline at end of file
+[4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, 240, 248]
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
index 969a59861809..0247f470a7c7 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
@@ -1,7 +1,7 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
// BaseCELListener is a complete listener for a parse tree produced by CELParser.
type BaseCELListener struct{}
@@ -74,11 +74,17 @@ func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {}
// ExitNegate is called when production Negate is exited.
func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {}
-// EnterSelectOrCall is called when production SelectOrCall is entered.
-func (s *BaseCELListener) EnterSelectOrCall(ctx *SelectOrCallContext) {}
+// EnterMemberCall is called when production MemberCall is entered.
+func (s *BaseCELListener) EnterMemberCall(ctx *MemberCallContext) {}
-// ExitSelectOrCall is called when production SelectOrCall is exited.
-func (s *BaseCELListener) ExitSelectOrCall(ctx *SelectOrCallContext) {}
+// ExitMemberCall is called when production MemberCall is exited.
+func (s *BaseCELListener) ExitMemberCall(ctx *MemberCallContext) {}
+
+// EnterSelect is called when production Select is entered.
+func (s *BaseCELListener) EnterSelect(ctx *SelectContext) {}
+
+// ExitSelect is called when production Select is exited.
+func (s *BaseCELListener) ExitSelect(ctx *SelectContext) {}
// EnterPrimaryExpr is called when production PrimaryExpr is entered.
func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {}
@@ -92,12 +98,6 @@ func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {}
// ExitIndex is called when production Index is exited.
func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {}
-// EnterCreateMessage is called when production CreateMessage is entered.
-func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {}
-
-// ExitCreateMessage is called when production CreateMessage is exited.
-func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {}
-
// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered.
func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
@@ -122,6 +122,12 @@ func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {}
// ExitCreateStruct is called when production CreateStruct is exited.
func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {}
+// EnterCreateMessage is called when production CreateMessage is entered.
+func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {}
+
+// ExitCreateMessage is called when production CreateMessage is exited.
+func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {}
+
// EnterConstantLiteral is called when production ConstantLiteral is entered.
func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {}
@@ -134,18 +140,36 @@ func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {}
// ExitExprList is called when production exprList is exited.
func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {}
+// EnterListInit is called when production listInit is entered.
+func (s *BaseCELListener) EnterListInit(ctx *ListInitContext) {}
+
+// ExitListInit is called when production listInit is exited.
+func (s *BaseCELListener) ExitListInit(ctx *ListInitContext) {}
+
// EnterFieldInitializerList is called when production fieldInitializerList is entered.
func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {}
// ExitFieldInitializerList is called when production fieldInitializerList is exited.
func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {}
+// EnterOptField is called when production optField is entered.
+func (s *BaseCELListener) EnterOptField(ctx *OptFieldContext) {}
+
+// ExitOptField is called when production optField is exited.
+func (s *BaseCELListener) ExitOptField(ctx *OptFieldContext) {}
+
// EnterMapInitializerList is called when production mapInitializerList is entered.
func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {}
// ExitMapInitializerList is called when production mapInitializerList is exited.
func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {}
+// EnterOptExpr is called when production optExpr is entered.
+func (s *BaseCELListener) EnterOptExpr(ctx *OptExprContext) {}
+
+// ExitOptExpr is called when production optExpr is exited.
+func (s *BaseCELListener) ExitOptExpr(ctx *OptExprContext) {}
+
// EnterInt is called when production Int is entered.
func (s *BaseCELListener) EnterInt(ctx *IntContext) {}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
index 8e84579ed17e..52a7f4dc57e2 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
@@ -1,7 +1,7 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
type BaseCELVisitor struct {
*antlr.BaseParseTreeVisitor
@@ -43,19 +43,19 @@ func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} {
return v.VisitChildren(ctx)
}
-func (v *BaseCELVisitor) VisitSelectOrCall(ctx *SelectOrCallContext) interface{} {
+func (v *BaseCELVisitor) VisitMemberCall(ctx *MemberCallContext) interface{} {
return v.VisitChildren(ctx)
}
-func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} {
+func (v *BaseCELVisitor) VisitSelect(ctx *SelectContext) interface{} {
return v.VisitChildren(ctx)
}
-func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} {
+func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} {
return v.VisitChildren(ctx)
}
-func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} {
+func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} {
return v.VisitChildren(ctx)
}
@@ -75,6 +75,10 @@ func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{}
return v.VisitChildren(ctx)
}
+func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} {
return v.VisitChildren(ctx)
}
@@ -83,14 +87,26 @@ func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} {
return v.VisitChildren(ctx)
}
+func (v *BaseCELVisitor) VisitListInit(ctx *ListInitContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} {
return v.VisitChildren(ctx)
}
+func (v *BaseCELVisitor) VisitOptField(ctx *OptFieldContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} {
return v.VisitChildren(ctx)
}
+func (v *BaseCELVisitor) VisitOptExpr(ctx *OptExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} {
return v.VisitChildren(ctx)
}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
index 7b4cca62e627..98ddc06d0bda 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
@@ -1,4 +1,4 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen
@@ -7,7 +7,7 @@ import (
"sync"
"unicode"
- "github.com/antlr/antlr4/runtime/Go/antlr"
+ "github.com/antlr/antlr4/runtime/Go/antlr/v4"
)
// Suppress unused import error
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
index 1b631b6e1be6..73b7f1d39fa6 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
@@ -1,7 +1,7 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
// CELListener is a complete listener for a parse tree produced by CELParser.
type CELListener interface {
@@ -34,8 +34,11 @@ type CELListener interface {
// EnterNegate is called when entering the Negate production.
EnterNegate(c *NegateContext)
- // EnterSelectOrCall is called when entering the SelectOrCall production.
- EnterSelectOrCall(c *SelectOrCallContext)
+ // EnterMemberCall is called when entering the MemberCall production.
+ EnterMemberCall(c *MemberCallContext)
+
+ // EnterSelect is called when entering the Select production.
+ EnterSelect(c *SelectContext)
// EnterPrimaryExpr is called when entering the PrimaryExpr production.
EnterPrimaryExpr(c *PrimaryExprContext)
@@ -43,9 +46,6 @@ type CELListener interface {
// EnterIndex is called when entering the Index production.
EnterIndex(c *IndexContext)
- // EnterCreateMessage is called when entering the CreateMessage production.
- EnterCreateMessage(c *CreateMessageContext)
-
// EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production.
EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext)
@@ -58,18 +58,30 @@ type CELListener interface {
// EnterCreateStruct is called when entering the CreateStruct production.
EnterCreateStruct(c *CreateStructContext)
+ // EnterCreateMessage is called when entering the CreateMessage production.
+ EnterCreateMessage(c *CreateMessageContext)
+
// EnterConstantLiteral is called when entering the ConstantLiteral production.
EnterConstantLiteral(c *ConstantLiteralContext)
// EnterExprList is called when entering the exprList production.
EnterExprList(c *ExprListContext)
+ // EnterListInit is called when entering the listInit production.
+ EnterListInit(c *ListInitContext)
+
// EnterFieldInitializerList is called when entering the fieldInitializerList production.
EnterFieldInitializerList(c *FieldInitializerListContext)
+ // EnterOptField is called when entering the optField production.
+ EnterOptField(c *OptFieldContext)
+
// EnterMapInitializerList is called when entering the mapInitializerList production.
EnterMapInitializerList(c *MapInitializerListContext)
+ // EnterOptExpr is called when entering the optExpr production.
+ EnterOptExpr(c *OptExprContext)
+
// EnterInt is called when entering the Int production.
EnterInt(c *IntContext)
@@ -121,8 +133,11 @@ type CELListener interface {
// ExitNegate is called when exiting the Negate production.
ExitNegate(c *NegateContext)
- // ExitSelectOrCall is called when exiting the SelectOrCall production.
- ExitSelectOrCall(c *SelectOrCallContext)
+ // ExitMemberCall is called when exiting the MemberCall production.
+ ExitMemberCall(c *MemberCallContext)
+
+ // ExitSelect is called when exiting the Select production.
+ ExitSelect(c *SelectContext)
// ExitPrimaryExpr is called when exiting the PrimaryExpr production.
ExitPrimaryExpr(c *PrimaryExprContext)
@@ -130,9 +145,6 @@ type CELListener interface {
// ExitIndex is called when exiting the Index production.
ExitIndex(c *IndexContext)
- // ExitCreateMessage is called when exiting the CreateMessage production.
- ExitCreateMessage(c *CreateMessageContext)
-
// ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production.
ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext)
@@ -145,18 +157,30 @@ type CELListener interface {
// ExitCreateStruct is called when exiting the CreateStruct production.
ExitCreateStruct(c *CreateStructContext)
+ // ExitCreateMessage is called when exiting the CreateMessage production.
+ ExitCreateMessage(c *CreateMessageContext)
+
// ExitConstantLiteral is called when exiting the ConstantLiteral production.
ExitConstantLiteral(c *ConstantLiteralContext)
// ExitExprList is called when exiting the exprList production.
ExitExprList(c *ExprListContext)
+ // ExitListInit is called when exiting the listInit production.
+ ExitListInit(c *ListInitContext)
+
// ExitFieldInitializerList is called when exiting the fieldInitializerList production.
ExitFieldInitializerList(c *FieldInitializerListContext)
+ // ExitOptField is called when exiting the optField production.
+ ExitOptField(c *OptFieldContext)
+
// ExitMapInitializerList is called when exiting the mapInitializerList production.
ExitMapInitializerList(c *MapInitializerListContext)
+ // ExitOptExpr is called when exiting the optExpr production.
+ ExitOptExpr(c *OptExprContext)
+
// ExitInt is called when exiting the Int production.
ExitInt(c *IntContext)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
index afb3fe0d1c2c..0cb6c8eae88e 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
@@ -1,4 +1,4 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
import (
@@ -6,7 +6,7 @@ import (
"strconv"
"sync"
- "github.com/antlr/antlr4/runtime/Go/antlr"
+ "github.com/antlr/antlr4/runtime/Go/antlr/v4"
)
// Suppress unused import errors
@@ -46,106 +46,125 @@ func celParserInit() {
}
staticData.ruleNames = []string{
"start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc",
- "unary", "member", "primary", "exprList", "fieldInitializerList", "mapInitializerList",
- "literal",
+ "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList",
+ "optField", "mapInitializerList", "optExpr", "literal",
}
staticData.predictionContextCache = antlr.NewPredictionContextCache()
staticData.serializedATN = []int32{
- 4, 1, 36, 209, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
+ 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
- 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 41, 8, 2, 10, 2,
- 12, 2, 44, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 49, 8, 3, 10, 3, 12, 3, 52, 9,
- 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 60, 8, 4, 10, 4, 12, 4, 63,
- 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 74, 8,
- 5, 10, 5, 12, 5, 77, 9, 5, 1, 6, 1, 6, 4, 6, 81, 8, 6, 11, 6, 12, 6, 82,
- 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 3, 6, 92, 8, 6, 1,
- 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 102, 8, 7, 1, 7, 3,
- 7, 105, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 115,
- 8, 7, 1, 7, 3, 7, 118, 8, 7, 1, 7, 5, 7, 121, 8, 7, 10, 7, 12, 7, 124,
- 9, 7, 1, 8, 3, 8, 127, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 132, 8, 8, 1, 8, 3,
- 8, 135, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 143, 8, 8, 1, 8,
- 3, 8, 146, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8,
- 8, 1, 8, 1, 8, 3, 8, 158, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 163, 8, 9, 10,
- 9, 12, 9, 166, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5,
- 10, 175, 8, 10, 10, 10, 12, 10, 178, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11,
- 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 188, 8, 11, 10, 11, 12, 11, 191, 9,
- 11, 1, 12, 3, 12, 194, 8, 12, 1, 12, 1, 12, 1, 12, 3, 12, 199, 8, 12, 1,
- 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 3, 12, 207, 8, 12, 1, 12, 0, 3,
- 8, 10, 14, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 3, 1,
- 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 235, 0, 26, 1, 0, 0, 0, 2,
- 29, 1, 0, 0, 0, 4, 37, 1, 0, 0, 0, 6, 45, 1, 0, 0, 0, 8, 53, 1, 0, 0, 0,
- 10, 64, 1, 0, 0, 0, 12, 91, 1, 0, 0, 0, 14, 93, 1, 0, 0, 0, 16, 157, 1,
- 0, 0, 0, 18, 159, 1, 0, 0, 0, 20, 167, 1, 0, 0, 0, 22, 179, 1, 0, 0, 0,
- 24, 206, 1, 0, 0, 0, 26, 27, 3, 2, 1, 0, 27, 28, 5, 0, 0, 1, 28, 1, 1,
- 0, 0, 0, 29, 35, 3, 4, 2, 0, 30, 31, 5, 20, 0, 0, 31, 32, 3, 4, 2, 0, 32,
- 33, 5, 21, 0, 0, 33, 34, 3, 2, 1, 0, 34, 36, 1, 0, 0, 0, 35, 30, 1, 0,
- 0, 0, 35, 36, 1, 0, 0, 0, 36, 3, 1, 0, 0, 0, 37, 42, 3, 6, 3, 0, 38, 39,
- 5, 9, 0, 0, 39, 41, 3, 6, 3, 0, 40, 38, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0,
- 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 5, 1, 0, 0, 0, 44, 42, 1, 0,
- 0, 0, 45, 50, 3, 8, 4, 0, 46, 47, 5, 8, 0, 0, 47, 49, 3, 8, 4, 0, 48, 46,
- 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0,
- 51, 7, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 54, 6, 4, -1, 0, 54, 55, 3,
- 10, 5, 0, 55, 61, 1, 0, 0, 0, 56, 57, 10, 1, 0, 0, 57, 58, 7, 0, 0, 0,
- 58, 60, 3, 8, 4, 2, 59, 56, 1, 0, 0, 0, 60, 63, 1, 0, 0, 0, 61, 59, 1,
- 0, 0, 0, 61, 62, 1, 0, 0, 0, 62, 9, 1, 0, 0, 0, 63, 61, 1, 0, 0, 0, 64,
- 65, 6, 5, -1, 0, 65, 66, 3, 12, 6, 0, 66, 75, 1, 0, 0, 0, 67, 68, 10, 2,
- 0, 0, 68, 69, 7, 1, 0, 0, 69, 74, 3, 10, 5, 3, 70, 71, 10, 1, 0, 0, 71,
- 72, 7, 2, 0, 0, 72, 74, 3, 10, 5, 2, 73, 67, 1, 0, 0, 0, 73, 70, 1, 0,
- 0, 0, 74, 77, 1, 0, 0, 0, 75, 73, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 11,
- 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 78, 92, 3, 14, 7, 0, 79, 81, 5, 19, 0,
- 0, 80, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83,
- 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 92, 3, 14, 7, 0, 85, 87, 5, 18, 0,
- 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89,
- 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 92, 3, 14, 7, 0, 91, 78, 1, 0, 0, 0,
- 91, 80, 1, 0, 0, 0, 91, 86, 1, 0, 0, 0, 92, 13, 1, 0, 0, 0, 93, 94, 6,
- 7, -1, 0, 94, 95, 3, 16, 8, 0, 95, 122, 1, 0, 0, 0, 96, 97, 10, 3, 0, 0,
- 97, 98, 5, 16, 0, 0, 98, 104, 5, 36, 0, 0, 99, 101, 5, 14, 0, 0, 100, 102,
- 3, 18, 9, 0, 101, 100, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 103, 1, 0,
- 0, 0, 103, 105, 5, 15, 0, 0, 104, 99, 1, 0, 0, 0, 104, 105, 1, 0, 0, 0,
- 105, 121, 1, 0, 0, 0, 106, 107, 10, 2, 0, 0, 107, 108, 5, 10, 0, 0, 108,
- 109, 3, 2, 1, 0, 109, 110, 5, 11, 0, 0, 110, 121, 1, 0, 0, 0, 111, 112,
- 10, 1, 0, 0, 112, 114, 5, 12, 0, 0, 113, 115, 3, 20, 10, 0, 114, 113, 1,
- 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 5, 17, 0,
- 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119,
- 121, 5, 13, 0, 0, 120, 96, 1, 0, 0, 0, 120, 106, 1, 0, 0, 0, 120, 111,
- 1, 0, 0, 0, 121, 124, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 122, 123, 1, 0,
- 0, 0, 123, 15, 1, 0, 0, 0, 124, 122, 1, 0, 0, 0, 125, 127, 5, 16, 0, 0,
- 126, 125, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128,
- 134, 5, 36, 0, 0, 129, 131, 5, 14, 0, 0, 130, 132, 3, 18, 9, 0, 131, 130,
- 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 135, 5, 15,
- 0, 0, 134, 129, 1, 0, 0, 0, 134, 135, 1, 0, 0, 0, 135, 158, 1, 0, 0, 0,
- 136, 137, 5, 14, 0, 0, 137, 138, 3, 2, 1, 0, 138, 139, 5, 15, 0, 0, 139,
- 158, 1, 0, 0, 0, 140, 142, 5, 10, 0, 0, 141, 143, 3, 18, 9, 0, 142, 141,
- 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 145, 1, 0, 0, 0, 144, 146, 5, 17,
- 0, 0, 145, 144, 1, 0, 0, 0, 145, 146, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0,
- 147, 158, 5, 11, 0, 0, 148, 150, 5, 12, 0, 0, 149, 151, 3, 22, 11, 0, 150,
- 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154,
- 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0,
- 0, 0, 155, 158, 5, 13, 0, 0, 156, 158, 3, 24, 12, 0, 157, 126, 1, 0, 0,
- 0, 157, 136, 1, 0, 0, 0, 157, 140, 1, 0, 0, 0, 157, 148, 1, 0, 0, 0, 157,
- 156, 1, 0, 0, 0, 158, 17, 1, 0, 0, 0, 159, 164, 3, 2, 1, 0, 160, 161, 5,
- 17, 0, 0, 161, 163, 3, 2, 1, 0, 162, 160, 1, 0, 0, 0, 163, 166, 1, 0, 0,
- 0, 164, 162, 1, 0, 0, 0, 164, 165, 1, 0, 0, 0, 165, 19, 1, 0, 0, 0, 166,
- 164, 1, 0, 0, 0, 167, 168, 5, 36, 0, 0, 168, 169, 5, 21, 0, 0, 169, 176,
- 3, 2, 1, 0, 170, 171, 5, 17, 0, 0, 171, 172, 5, 36, 0, 0, 172, 173, 5,
- 21, 0, 0, 173, 175, 3, 2, 1, 0, 174, 170, 1, 0, 0, 0, 175, 178, 1, 0, 0,
- 0, 176, 174, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 21, 1, 0, 0, 0, 178,
- 176, 1, 0, 0, 0, 179, 180, 3, 2, 1, 0, 180, 181, 5, 21, 0, 0, 181, 189,
- 3, 2, 1, 0, 182, 183, 5, 17, 0, 0, 183, 184, 3, 2, 1, 0, 184, 185, 5, 21,
- 0, 0, 185, 186, 3, 2, 1, 0, 186, 188, 1, 0, 0, 0, 187, 182, 1, 0, 0, 0,
- 188, 191, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 189, 190, 1, 0, 0, 0, 190,
- 23, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 192, 194, 5, 18, 0, 0, 193, 192,
- 1, 0, 0, 0, 193, 194, 1, 0, 0, 0, 194, 195, 1, 0, 0, 0, 195, 207, 5, 32,
- 0, 0, 196, 207, 5, 33, 0, 0, 197, 199, 5, 18, 0, 0, 198, 197, 1, 0, 0,
- 0, 198, 199, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 207, 5, 31, 0, 0, 201,
- 207, 5, 34, 0, 0, 202, 207, 5, 35, 0, 0, 203, 207, 5, 26, 0, 0, 204, 207,
- 5, 27, 0, 0, 205, 207, 5, 28, 0, 0, 206, 193, 1, 0, 0, 0, 206, 196, 1,
- 0, 0, 0, 206, 198, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 206, 202, 1, 0, 0,
- 0, 206, 203, 1, 0, 0, 0, 206, 204, 1, 0, 0, 0, 206, 205, 1, 0, 0, 0, 207,
- 25, 1, 0, 0, 0, 29, 35, 42, 50, 61, 73, 75, 82, 88, 91, 101, 104, 114,
- 117, 120, 122, 126, 131, 134, 142, 145, 150, 153, 157, 164, 176, 189, 193,
- 198, 206,
+ 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
+ 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1,
+ 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3,
+ 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1,
+ 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5,
+ 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1,
+ 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6,
+ 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3,
+ 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7,
+ 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10,
+ 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136,
+ 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8,
+ 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1,
+ 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8,
+ 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8,
+ 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186,
+ 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10,
+ 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1,
+ 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12,
+ 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1,
+ 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14,
+ 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15,
+ 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249,
+ 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22,
+ 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1,
+ 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14,
+ 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0,
+ 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28,
+ 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0,
+ 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38,
+ 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0,
+ 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6,
+ 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50,
+ 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0,
+ 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3,
+ 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56,
+ 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1,
+ 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64,
+ 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0,
+ 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0,
+ 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73,
+ 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1,
+ 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79,
+ 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0,
+ 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87,
+ 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0,
+ 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5,
+ 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94,
+ 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0,
+ 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100,
+ 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10,
+ 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0,
+ 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0,
+ 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111,
+ 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114,
+ 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10,
+ 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0,
+ 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0,
+ 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124,
+ 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124,
+ 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0,
+ 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0,
+ 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134,
+ 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137,
+ 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0,
+ 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0,
+ 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145,
+ 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149,
+ 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0,
+ 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0,
+ 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0,
+ 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157,
+ 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162,
+ 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0,
+ 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0,
+ 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168,
+ 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173,
+ 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1,
+ 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0,
+ 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179,
+ 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144,
+ 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0,
+ 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0,
+ 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187,
+ 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1,
+ 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28,
+ 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0,
+ 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199,
+ 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5,
+ 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2,
+ 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0,
+ 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208,
+ 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0,
+ 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0,
+ 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219,
+ 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223,
+ 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0,
+ 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0,
+ 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230,
+ 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1,
+ 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0,
+ 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0,
+ 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241,
+ 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249,
+ 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5,
+ 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0,
+ 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248,
+ 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48,
+ 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146,
+ 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235,
+ 240, 248,
}
deserializer := antlr.NewATNDeserializer(nil)
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
@@ -234,9 +253,12 @@ const (
CELParserRULE_member = 7
CELParserRULE_primary = 8
CELParserRULE_exprList = 9
- CELParserRULE_fieldInitializerList = 10
- CELParserRULE_mapInitializerList = 11
- CELParserRULE_literal = 12
+ CELParserRULE_listInit = 10
+ CELParserRULE_fieldInitializerList = 11
+ CELParserRULE_optField = 12
+ CELParserRULE_mapInitializerList = 13
+ CELParserRULE_optExpr = 14
+ CELParserRULE_literal = 15
)
// IStartContext is an interface to support dynamic dispatch.
@@ -252,6 +274,10 @@ type IStartContext interface {
// SetE sets the e rule contexts.
SetE(IExprContext)
+ // Getter signatures
+ EOF() antlr.TerminalNode
+ Expr() IExprContext
+
// IsStartContext differentiates from other interfaces.
IsStartContext()
}
@@ -363,14 +389,14 @@ func (p *CELParser) Start() (localctx IStartContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(26)
+ p.SetState(32)
var _x = p.Expr()
localctx.(*StartContext).e = _x
}
{
- p.SetState(27)
+ p.SetState(33)
p.Match(CELParserEOF)
}
@@ -408,6 +434,13 @@ type IExprContext interface {
// SetE2 sets the e2 rule contexts.
SetE2(IExprContext)
+ // Getter signatures
+ AllConditionalOr() []IConditionalOrContext
+ ConditionalOr(i int) IConditionalOrContext
+ COLON() antlr.TerminalNode
+ QUESTIONMARK() antlr.TerminalNode
+ Expr() IExprContext
+
// IsExprContext differentiates from other interfaces.
IsExprContext()
}
@@ -580,37 +613,37 @@ func (p *CELParser) Expr() (localctx IExprContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(29)
+ p.SetState(35)
var _x = p.ConditionalOr()
localctx.(*ExprContext).e = _x
}
- p.SetState(35)
+ p.SetState(41)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
if _la == CELParserQUESTIONMARK {
{
- p.SetState(30)
+ p.SetState(36)
var _m = p.Match(CELParserQUESTIONMARK)
localctx.(*ExprContext).op = _m
}
{
- p.SetState(31)
+ p.SetState(37)
var _x = p.ConditionalOr()
localctx.(*ExprContext).e1 = _x
}
{
- p.SetState(32)
+ p.SetState(38)
p.Match(CELParserCOLON)
}
{
- p.SetState(33)
+ p.SetState(39)
var _x = p.Expr()
@@ -659,6 +692,12 @@ type IConditionalOrContext interface {
// SetE1 sets the e1 rule context list.
SetE1([]IConditionalAndContext)
+ // Getter signatures
+ AllConditionalAnd() []IConditionalAndContext
+ ConditionalAnd(i int) IConditionalAndContext
+ AllLOGICAL_OR() []antlr.TerminalNode
+ LOGICAL_OR(i int) antlr.TerminalNode
+
// IsConditionalOrContext differentiates from other interfaces.
IsConditionalOrContext()
}
@@ -820,19 +859,19 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(37)
+ p.SetState(43)
var _x = p.ConditionalAnd()
localctx.(*ConditionalOrContext).e = _x
}
- p.SetState(42)
+ p.SetState(48)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
for _la == CELParserLOGICAL_OR {
{
- p.SetState(38)
+ p.SetState(44)
var _m = p.Match(CELParserLOGICAL_OR)
@@ -840,7 +879,7 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
}
localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9)
{
- p.SetState(39)
+ p.SetState(45)
var _x = p.ConditionalAnd()
@@ -848,7 +887,7 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
}
localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd)
- p.SetState(44)
+ p.SetState(50)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
}
@@ -893,6 +932,12 @@ type IConditionalAndContext interface {
// SetE1 sets the e1 rule context list.
SetE1([]IRelationContext)
+ // Getter signatures
+ AllRelation() []IRelationContext
+ Relation(i int) IRelationContext
+ AllLOGICAL_AND() []antlr.TerminalNode
+ LOGICAL_AND(i int) antlr.TerminalNode
+
// IsConditionalAndContext differentiates from other interfaces.
IsConditionalAndContext()
}
@@ -1054,19 +1099,19 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(45)
+ p.SetState(51)
var _x = p.relation(0)
localctx.(*ConditionalAndContext).e = _x
}
- p.SetState(50)
+ p.SetState(56)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
for _la == CELParserLOGICAL_AND {
{
- p.SetState(46)
+ p.SetState(52)
var _m = p.Match(CELParserLOGICAL_AND)
@@ -1074,7 +1119,7 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
}
localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8)
{
- p.SetState(47)
+ p.SetState(53)
var _x = p.relation(0)
@@ -1082,7 +1127,7 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
}
localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation)
- p.SetState(52)
+ p.SetState(58)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
}
@@ -1103,6 +1148,18 @@ type IRelationContext interface {
// SetOp sets the op token.
SetOp(antlr.Token)
+ // Getter signatures
+ Calc() ICalcContext
+ AllRelation() []IRelationContext
+ Relation(i int) IRelationContext
+ LESS() antlr.TerminalNode
+ LESS_EQUALS() antlr.TerminalNode
+ GREATER_EQUALS() antlr.TerminalNode
+ GREATER() antlr.TerminalNode
+ EQUALS() antlr.TerminalNode
+ NOT_EQUALS() antlr.TerminalNode
+ IN() antlr.TerminalNode
+
// IsRelationContext differentiates from other interfaces.
IsRelationContext()
}
@@ -1291,12 +1348,12 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(54)
+ p.SetState(60)
p.calc(0)
}
p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
- p.SetState(61)
+ p.SetState(67)
p.GetErrorHandler().Sync(p)
_alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext())
@@ -1308,13 +1365,13 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
_prevctx = localctx
localctx = NewRelationContext(p, _parentctx, _parentState)
p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation)
- p.SetState(56)
+ p.SetState(62)
if !(p.Precpred(p.GetParserRuleContext(), 1)) {
panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
}
{
- p.SetState(57)
+ p.SetState(63)
var _lt = p.GetTokenStream().LT(1)
@@ -1322,7 +1379,7 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
_la = p.GetTokenStream().LA(1)
- if !(((_la)&-(0x1f+1)) == 0 && ((1<.map(, )
-// .map(, , )
+//
+// .map(, )
+// .map(, , )
+//
// In the second form only iterVar values which return true when provided to the predicate expression
// are transformed.
func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/options.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/options.go
index b50686a91207..8bfdae55b919 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/options.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/options.go
@@ -23,6 +23,7 @@ type options struct {
expressionSizeCodePointLimit int
macros map[string]Macro
populateMacroCalls bool
+ enableOptionalSyntax bool
}
// Option configures the behavior of the parser.
@@ -102,3 +103,11 @@ func PopulateMacroCalls(populateMacroCalls bool) Option {
return nil
}
}
+
+// EnableOptionalSyntax enables syntax for optional field and index selection.
+func EnableOptionalSyntax(optionalSyntax bool) Option {
+ return func(opts *options) error {
+ opts.enableOptionalSyntax = optionalSyntax
+ return nil
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/parser.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/parser.go
index 072f62457455..da4481776f18 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/parser.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/parser.go
@@ -18,11 +18,13 @@ package parser
import (
"fmt"
+ "regexp"
"strconv"
"strings"
"sync"
- "github.com/antlr/antlr4/runtime/Go/antlr"
+ antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/runes"
@@ -92,6 +94,7 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors
errorRecoveryLimit: p.errorRecoveryLimit,
errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit,
populateMacroCalls: p.populateMacroCalls,
+ enableOptionalSyntax: p.enableOptionalSyntax,
}
buf, ok := source.(runes.Buffer)
if !ok {
@@ -178,7 +181,7 @@ func (rl *recursionListener) EnterEveryRule(ctx antlr.ParserRuleContext) {
} else {
*depth++
}
- if *depth >= rl.maxDepth {
+ if *depth > rl.maxDepth {
panic(&recursionError{
message: fmt.Sprintf("expression recursion limit exceeded: %d", rl.maxDepth),
})
@@ -275,13 +278,14 @@ type parser struct {
errorRecoveryLimit int
errorRecoveryLookaheadTokenLimit int
populateMacroCalls bool
+ enableOptionalSyntax bool
}
var (
_ gen.CELVisitor = (*parser)(nil)
lexerPool *sync.Pool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
l := gen.NewCELLexer(nil)
l.RemoveErrorListeners()
return l
@@ -289,7 +293,7 @@ var (
}
parserPool *sync.Pool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
p := gen.NewCELParser(nil)
p.RemoveErrorListeners()
return p
@@ -352,57 +356,85 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr {
}
// Visitor implementations.
-func (p *parser) Visit(tree antlr.ParseTree) interface{} {
- p.recursionDepth++
- if p.recursionDepth > p.maxRecursionDepth {
- panic(&recursionError{message: "max recursion depth exceeded"})
- }
- defer func() {
- p.recursionDepth--
- }()
- switch tree.(type) {
+func (p *parser) Visit(tree antlr.ParseTree) any {
+ t := unnest(tree)
+ switch tree := t.(type) {
case *gen.StartContext:
- return p.VisitStart(tree.(*gen.StartContext))
+ return p.VisitStart(tree)
case *gen.ExprContext:
- return p.VisitExpr(tree.(*gen.ExprContext))
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitExpr(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.ConditionalAndContext:
- return p.VisitConditionalAnd(tree.(*gen.ConditionalAndContext))
+ return p.VisitConditionalAnd(tree)
case *gen.ConditionalOrContext:
- return p.VisitConditionalOr(tree.(*gen.ConditionalOrContext))
+ return p.VisitConditionalOr(tree)
case *gen.RelationContext:
- return p.VisitRelation(tree.(*gen.RelationContext))
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitRelation(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.CalcContext:
- return p.VisitCalc(tree.(*gen.CalcContext))
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitCalc(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.LogicalNotContext:
- return p.VisitLogicalNot(tree.(*gen.LogicalNotContext))
- case *gen.MemberExprContext:
- return p.VisitMemberExpr(tree.(*gen.MemberExprContext))
- case *gen.PrimaryExprContext:
- return p.VisitPrimaryExpr(tree.(*gen.PrimaryExprContext))
- case *gen.SelectOrCallContext:
- return p.VisitSelectOrCall(tree.(*gen.SelectOrCallContext))
+ return p.VisitLogicalNot(tree)
+ case *gen.IdentOrGlobalCallContext:
+ return p.VisitIdentOrGlobalCall(tree)
+ case *gen.SelectContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitSelect(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.MemberCallContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitMemberCall(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.MapInitializerListContext:
- return p.VisitMapInitializerList(tree.(*gen.MapInitializerListContext))
+ return p.VisitMapInitializerList(tree)
case *gen.NegateContext:
- return p.VisitNegate(tree.(*gen.NegateContext))
+ return p.VisitNegate(tree)
case *gen.IndexContext:
- return p.VisitIndex(tree.(*gen.IndexContext))
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitIndex(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.UnaryContext:
- return p.VisitUnary(tree.(*gen.UnaryContext))
+ return p.VisitUnary(tree)
case *gen.CreateListContext:
- return p.VisitCreateList(tree.(*gen.CreateListContext))
+ return p.VisitCreateList(tree)
case *gen.CreateMessageContext:
- return p.VisitCreateMessage(tree.(*gen.CreateMessageContext))
+ return p.VisitCreateMessage(tree)
case *gen.CreateStructContext:
- return p.VisitCreateStruct(tree.(*gen.CreateStructContext))
+ return p.VisitCreateStruct(tree)
+ case *gen.IntContext:
+ return p.VisitInt(tree)
+ case *gen.UintContext:
+ return p.VisitUint(tree)
+ case *gen.DoubleContext:
+ return p.VisitDouble(tree)
+ case *gen.StringContext:
+ return p.VisitString(tree)
+ case *gen.BytesContext:
+ return p.VisitBytes(tree)
+ case *gen.BoolFalseContext:
+ return p.VisitBoolFalse(tree)
+ case *gen.BoolTrueContext:
+ return p.VisitBoolTrue(tree)
+ case *gen.NullContext:
+ return p.VisitNull(tree)
}
// Report at least one error if the parser reaches an unknown parse element.
// Typically, this happens if the parser has already encountered a syntax error elsewhere.
if len(p.errors.GetErrors()) == 0 {
txt := "<>"
- if tree != nil {
- txt = fmt.Sprintf("<<%T>>", tree)
+ if t != nil {
+ txt = fmt.Sprintf("<<%T>>", t)
}
return p.reportError(common.NoLocation, "unknown parse element encountered: %s", txt)
}
@@ -411,12 +443,12 @@ func (p *parser) Visit(tree antlr.ParseTree) interface{} {
}
// Visit a parse tree produced by CELParser#start.
-func (p *parser) VisitStart(ctx *gen.StartContext) interface{} {
+func (p *parser) VisitStart(ctx *gen.StartContext) any {
return p.Visit(ctx.Expr())
}
// Visit a parse tree produced by CELParser#expr.
-func (p *parser) VisitExpr(ctx *gen.ExprContext) interface{} {
+func (p *parser) VisitExpr(ctx *gen.ExprContext) any {
result := p.Visit(ctx.GetE()).(*exprpb.Expr)
if ctx.GetOp() == nil {
return result
@@ -428,11 +460,8 @@ func (p *parser) VisitExpr(ctx *gen.ExprContext) interface{} {
}
// Visit a parse tree produced by CELParser#conditionalOr.
-func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) interface{} {
+func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any {
result := p.Visit(ctx.GetE()).(*exprpb.Expr)
- if ctx.GetOps() == nil {
- return result
- }
b := newBalancer(p.helper, operators.LogicalOr, result)
rest := ctx.GetE1()
for i, op := range ctx.GetOps() {
@@ -447,11 +476,8 @@ func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) interface{} {
}
// Visit a parse tree produced by CELParser#conditionalAnd.
-func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) interface{} {
+func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any {
result := p.Visit(ctx.GetE()).(*exprpb.Expr)
- if ctx.GetOps() == nil {
- return result
- }
b := newBalancer(p.helper, operators.LogicalAnd, result)
rest := ctx.GetE1()
for i, op := range ctx.GetOps() {
@@ -466,10 +492,7 @@ func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) interface{}
}
// Visit a parse tree produced by CELParser#relation.
-func (p *parser) VisitRelation(ctx *gen.RelationContext) interface{} {
- if ctx.Calc() != nil {
- return p.Visit(ctx.Calc())
- }
+func (p *parser) VisitRelation(ctx *gen.RelationContext) any {
opText := ""
if ctx.GetOp() != nil {
opText = ctx.GetOp().GetText()
@@ -484,10 +507,7 @@ func (p *parser) VisitRelation(ctx *gen.RelationContext) interface{} {
}
// Visit a parse tree produced by CELParser#calc.
-func (p *parser) VisitCalc(ctx *gen.CalcContext) interface{} {
- if ctx.Unary() != nil {
- return p.Visit(ctx.Unary())
- }
+func (p *parser) VisitCalc(ctx *gen.CalcContext) any {
opText := ""
if ctx.GetOp() != nil {
opText = ctx.GetOp().GetText()
@@ -501,27 +521,12 @@ func (p *parser) VisitCalc(ctx *gen.CalcContext) interface{} {
return p.reportError(ctx, "operator not found")
}
-func (p *parser) VisitUnary(ctx *gen.UnaryContext) interface{} {
+func (p *parser) VisitUnary(ctx *gen.UnaryContext) any {
return p.helper.newLiteralString(ctx, "<>")
}
-// Visit a parse tree produced by CELParser#MemberExpr.
-func (p *parser) VisitMemberExpr(ctx *gen.MemberExprContext) interface{} {
- switch ctx.Member().(type) {
- case *gen.PrimaryExprContext:
- return p.VisitPrimaryExpr(ctx.Member().(*gen.PrimaryExprContext))
- case *gen.SelectOrCallContext:
- return p.VisitSelectOrCall(ctx.Member().(*gen.SelectOrCallContext))
- case *gen.IndexContext:
- return p.VisitIndex(ctx.Member().(*gen.IndexContext))
- case *gen.CreateMessageContext:
- return p.VisitCreateMessage(ctx.Member().(*gen.CreateMessageContext))
- }
- return p.reportError(ctx, "unsupported simple expression")
-}
-
// Visit a parse tree produced by CELParser#LogicalNot.
-func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) interface{} {
+func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any {
if len(ctx.GetOps())%2 == 0 {
return p.Visit(ctx.Member())
}
@@ -530,7 +535,7 @@ func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) interface{} {
return p.globalCallOrMacro(opID, operators.LogicalNot, target)
}
-func (p *parser) VisitNegate(ctx *gen.NegateContext) interface{} {
+func (p *parser) VisitNegate(ctx *gen.NegateContext) any {
if len(ctx.GetOps())%2 == 0 {
return p.Visit(ctx.Member())
}
@@ -539,60 +544,77 @@ func (p *parser) VisitNegate(ctx *gen.NegateContext) interface{} {
return p.globalCallOrMacro(opID, operators.Negate, target)
}
-// Visit a parse tree produced by CELParser#SelectOrCall.
-func (p *parser) VisitSelectOrCall(ctx *gen.SelectOrCallContext) interface{} {
+// VisitSelect visits a parse tree produced by CELParser#Select.
+func (p *parser) VisitSelect(ctx *gen.SelectContext) any {
operand := p.Visit(ctx.Member()).(*exprpb.Expr)
// Handle the error case where no valid identifier is specified.
- if ctx.GetId() == nil {
+ if ctx.GetId() == nil || ctx.GetOp() == nil {
return p.helper.newExpr(ctx)
}
id := ctx.GetId().GetText()
- if ctx.GetOpen() != nil {
- opID := p.helper.id(ctx.GetOpen())
- return p.receiverCallOrMacro(opID, id, operand, p.visitList(ctx.GetArgs())...)
+ if ctx.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ return p.reportError(ctx.GetOp(), "unsupported syntax '.?'")
+ }
+ return p.helper.newGlobalCall(
+ ctx.GetOp(),
+ operators.OptSelect,
+ operand,
+ p.helper.newLiteralString(ctx.GetId(), id))
}
return p.helper.newSelect(ctx.GetOp(), operand, id)
}
-// Visit a parse tree produced by CELParser#PrimaryExpr.
-func (p *parser) VisitPrimaryExpr(ctx *gen.PrimaryExprContext) interface{} {
- switch ctx.Primary().(type) {
- case *gen.NestedContext:
- return p.VisitNested(ctx.Primary().(*gen.NestedContext))
- case *gen.IdentOrGlobalCallContext:
- return p.VisitIdentOrGlobalCall(ctx.Primary().(*gen.IdentOrGlobalCallContext))
- case *gen.CreateListContext:
- return p.VisitCreateList(ctx.Primary().(*gen.CreateListContext))
- case *gen.CreateStructContext:
- return p.VisitCreateStruct(ctx.Primary().(*gen.CreateStructContext))
- case *gen.ConstantLiteralContext:
- return p.VisitConstantLiteral(ctx.Primary().(*gen.ConstantLiteralContext))
+// VisitMemberCall visits a parse tree produced by CELParser#MemberCall.
+func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any {
+ operand := p.Visit(ctx.Member()).(*exprpb.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil {
+ return p.helper.newExpr(ctx)
}
-
- return p.reportError(ctx, "invalid primary expression")
+ id := ctx.GetId().GetText()
+ opID := p.helper.id(ctx.GetOpen())
+ return p.receiverCallOrMacro(opID, id, operand, p.visitExprList(ctx.GetArgs())...)
}
// Visit a parse tree produced by CELParser#Index.
-func (p *parser) VisitIndex(ctx *gen.IndexContext) interface{} {
+func (p *parser) VisitIndex(ctx *gen.IndexContext) any {
target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetOp() == nil {
+ return p.helper.newExpr(ctx)
+ }
opID := p.helper.id(ctx.GetOp())
index := p.Visit(ctx.GetIndex()).(*exprpb.Expr)
- return p.globalCallOrMacro(opID, operators.Index, target, index)
+ operator := operators.Index
+ if ctx.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ return p.reportError(ctx.GetOp(), "unsupported syntax '[?'")
+ }
+ operator = operators.OptIndex
+ }
+ return p.globalCallOrMacro(opID, operator, target, index)
}
// Visit a parse tree produced by CELParser#CreateMessage.
-func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) interface{} {
- target := p.Visit(ctx.Member()).(*exprpb.Expr)
- objID := p.helper.id(ctx.GetOp())
- if messageName, found := p.extractQualifiedName(target); found {
- entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
- return p.helper.newObject(objID, messageName, entries...)
+func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any {
+ messageName := ""
+ for _, id := range ctx.GetIds() {
+ if len(messageName) != 0 {
+ messageName += "."
+ }
+ messageName += id.GetText()
+ }
+ if ctx.GetLeadingDot() != nil {
+ messageName = "." + messageName
}
- return p.helper.newExpr(objID)
+ objID := p.helper.id(ctx.GetOp())
+ entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
+ return p.helper.newObject(objID, messageName, entries...)
}
// Visit a parse tree of field initializers.
-func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) interface{} {
+func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any {
if ctx == nil || ctx.GetFields() == nil {
// This is the result of a syntax error handled elswhere, return empty.
return []*exprpb.Expr_CreateStruct_Entry{}
@@ -607,15 +629,27 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext
return []*exprpb.Expr_CreateStruct_Entry{}
}
initID := p.helper.id(cols[i])
+ optField := f.(*gen.OptFieldContext)
+ optional := optField.GetOpt() != nil
+ if !p.enableOptionalSyntax && optional {
+ p.reportError(optField, "unsupported syntax '?'")
+ continue
+ }
+ // The field may be empty due to a prior error.
+ id := optField.IDENTIFIER()
+ if id == nil {
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+ fieldName := id.GetText()
value := p.Visit(vals[i]).(*exprpb.Expr)
- field := p.helper.newObjectField(initID, f.GetText(), value)
+ field := p.helper.newObjectField(initID, fieldName, value, optional)
result[i] = field
}
return result
}
// Visit a parse tree produced by CELParser#IdentOrGlobalCall.
-func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) interface{} {
+func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any {
identName := ""
if ctx.GetLeadingDot() != nil {
identName = "."
@@ -632,24 +666,20 @@ func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) inter
identName += id
if ctx.GetOp() != nil {
opID := p.helper.id(ctx.GetOp())
- return p.globalCallOrMacro(opID, identName, p.visitList(ctx.GetArgs())...)
+ return p.globalCallOrMacro(opID, identName, p.visitExprList(ctx.GetArgs())...)
}
return p.helper.newIdent(ctx.GetId(), identName)
}
-// Visit a parse tree produced by CELParser#Nested.
-func (p *parser) VisitNested(ctx *gen.NestedContext) interface{} {
- return p.Visit(ctx.GetE())
-}
-
// Visit a parse tree produced by CELParser#CreateList.
-func (p *parser) VisitCreateList(ctx *gen.CreateListContext) interface{} {
+func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any {
listID := p.helper.id(ctx.GetOp())
- return p.helper.newList(listID, p.visitList(ctx.GetElems())...)
+ elems, optionals := p.visitListInit(ctx.GetElems())
+ return p.helper.newList(listID, elems, optionals...)
}
// Visit a parse tree produced by CELParser#CreateStruct.
-func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) interface{} {
+func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any {
structID := p.helper.id(ctx.GetOp())
entries := []*exprpb.Expr_CreateStruct_Entry{}
if ctx.GetEntries() != nil {
@@ -658,31 +688,8 @@ func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) interface{} {
return p.helper.newMap(structID, entries...)
}
-// Visit a parse tree produced by CELParser#ConstantLiteral.
-func (p *parser) VisitConstantLiteral(ctx *gen.ConstantLiteralContext) interface{} {
- switch ctx.Literal().(type) {
- case *gen.IntContext:
- return p.VisitInt(ctx.Literal().(*gen.IntContext))
- case *gen.UintContext:
- return p.VisitUint(ctx.Literal().(*gen.UintContext))
- case *gen.DoubleContext:
- return p.VisitDouble(ctx.Literal().(*gen.DoubleContext))
- case *gen.StringContext:
- return p.VisitString(ctx.Literal().(*gen.StringContext))
- case *gen.BytesContext:
- return p.VisitBytes(ctx.Literal().(*gen.BytesContext))
- case *gen.BoolFalseContext:
- return p.VisitBoolFalse(ctx.Literal().(*gen.BoolFalseContext))
- case *gen.BoolTrueContext:
- return p.VisitBoolTrue(ctx.Literal().(*gen.BoolTrueContext))
- case *gen.NullContext:
- return p.VisitNull(ctx.Literal().(*gen.NullContext))
- }
- return p.reportError(ctx, "invalid literal")
-}
-
// Visit a parse tree produced by CELParser#mapInitializerList.
-func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) interface{} {
+func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any {
if ctx == nil || ctx.GetKeys() == nil {
// This is the result of a syntax error handled elswhere, return empty.
return []*exprpb.Expr_CreateStruct_Entry{}
@@ -697,16 +704,22 @@ func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) int
// This is the result of a syntax error detected elsewhere.
return []*exprpb.Expr_CreateStruct_Entry{}
}
- key := p.Visit(keys[i]).(*exprpb.Expr)
+ optKey := keys[i]
+ optional := optKey.GetOpt() != nil
+ if !p.enableOptionalSyntax && optional {
+ p.reportError(optKey, "unsupported syntax '?'")
+ continue
+ }
+ key := p.Visit(optKey.GetE()).(*exprpb.Expr)
value := p.Visit(vals[i]).(*exprpb.Expr)
- entry := p.helper.newMapEntry(colID, key, value)
+ entry := p.helper.newMapEntry(colID, key, value, optional)
result[i] = entry
}
return result
}
// Visit a parse tree produced by CELParser#Int.
-func (p *parser) VisitInt(ctx *gen.IntContext) interface{} {
+func (p *parser) VisitInt(ctx *gen.IntContext) any {
text := ctx.GetTok().GetText()
base := 10
if strings.HasPrefix(text, "0x") {
@@ -724,7 +737,7 @@ func (p *parser) VisitInt(ctx *gen.IntContext) interface{} {
}
// Visit a parse tree produced by CELParser#Uint.
-func (p *parser) VisitUint(ctx *gen.UintContext) interface{} {
+func (p *parser) VisitUint(ctx *gen.UintContext) any {
text := ctx.GetTok().GetText()
// trim the 'u' designator included in the uint literal.
text = text[:len(text)-1]
@@ -741,7 +754,7 @@ func (p *parser) VisitUint(ctx *gen.UintContext) interface{} {
}
// Visit a parse tree produced by CELParser#Double.
-func (p *parser) VisitDouble(ctx *gen.DoubleContext) interface{} {
+func (p *parser) VisitDouble(ctx *gen.DoubleContext) any {
txt := ctx.GetTok().GetText()
if ctx.GetSign() != nil {
txt = ctx.GetSign().GetText() + txt
@@ -755,42 +768,66 @@ func (p *parser) VisitDouble(ctx *gen.DoubleContext) interface{} {
}
// Visit a parse tree produced by CELParser#String.
-func (p *parser) VisitString(ctx *gen.StringContext) interface{} {
+func (p *parser) VisitString(ctx *gen.StringContext) any {
s := p.unquote(ctx, ctx.GetText(), false)
return p.helper.newLiteralString(ctx, s)
}
// Visit a parse tree produced by CELParser#Bytes.
-func (p *parser) VisitBytes(ctx *gen.BytesContext) interface{} {
+func (p *parser) VisitBytes(ctx *gen.BytesContext) any {
b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:], true))
return p.helper.newLiteralBytes(ctx, b)
}
// Visit a parse tree produced by CELParser#BoolTrue.
-func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) interface{} {
+func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) any {
return p.helper.newLiteralBool(ctx, true)
}
// Visit a parse tree produced by CELParser#BoolFalse.
-func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) interface{} {
+func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any {
return p.helper.newLiteralBool(ctx, false)
}
// Visit a parse tree produced by CELParser#Null.
-func (p *parser) VisitNull(ctx *gen.NullContext) interface{} {
+func (p *parser) VisitNull(ctx *gen.NullContext) any {
return p.helper.newLiteral(ctx,
&exprpb.Constant{
ConstantKind: &exprpb.Constant_NullValue{
NullValue: structpb.NullValue_NULL_VALUE}})
}
-func (p *parser) visitList(ctx gen.IExprListContext) []*exprpb.Expr {
+func (p *parser) visitExprList(ctx gen.IExprListContext) []*exprpb.Expr {
if ctx == nil {
return []*exprpb.Expr{}
}
return p.visitSlice(ctx.GetE())
}
+func (p *parser) visitListInit(ctx gen.IListInitContext) ([]*exprpb.Expr, []int32) {
+ if ctx == nil {
+ return []*exprpb.Expr{}, []int32{}
+ }
+ elements := ctx.GetElems()
+ result := make([]*exprpb.Expr, len(elements))
+ optionals := []int32{}
+ for i, e := range elements {
+ ex := p.Visit(e.GetE()).(*exprpb.Expr)
+ if ex == nil {
+ return []*exprpb.Expr{}, []int32{}
+ }
+ result[i] = ex
+ if e.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ p.reportError(e.GetOpt(), "unsupported syntax '?'")
+ continue
+ }
+ optionals = append(optionals, int32(i))
+ }
+ }
+ return result, optionals
+}
+
func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr {
if expressions == nil {
return []*exprpb.Expr{}
@@ -803,26 +840,7 @@ func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr {
return result
}
-func (p *parser) extractQualifiedName(e *exprpb.Expr) (string, bool) {
- if e == nil {
- return "", false
- }
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr:
- return e.GetIdentExpr().GetName(), true
- case *exprpb.Expr_SelectExpr:
- s := e.GetSelectExpr()
- if prefix, found := p.extractQualifiedName(s.GetOperand()); found {
- return prefix + "." + s.GetField(), true
- }
- }
- // TODO: Add a method to Source to get location from character offset.
- location := p.helper.getLocation(e.GetId())
- p.reportError(location, "expected a qualified name")
- return "", false
-}
-
-func (p *parser) unquote(ctx interface{}, value string, isBytes bool) string {
+func (p *parser) unquote(ctx any, value string, isBytes bool) string {
text, err := unescape(value, isBytes)
if err != nil {
p.reportError(ctx, "%s", err.Error())
@@ -831,7 +849,7 @@ func (p *parser) unquote(ctx interface{}, value string, isBytes bool) string {
return text
}
-func (p *parser) reportError(ctx interface{}, format string, args ...interface{}) *exprpb.Expr {
+func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr {
var location common.Location
switch ctx.(type) {
case common.Location:
@@ -847,9 +865,15 @@ func (p *parser) reportError(ctx interface{}, format string, args ...interface{}
}
// ANTLR Parse listener implementations
-func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) {
+func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) {
// TODO: Snippet
l := p.helper.source.NewLocation(line, column)
+ // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word
+ // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error
+ // messages out of ANTLR to prevent future breaking changes related to error message content.
+ if strings.Contains(msg, "no viable alternative") {
+ msg = reservedIdentifier.ReplaceAllString(msg, mismatchedReservedIdentifier)
+ }
p.errors.syntaxError(l, msg)
}
@@ -892,14 +916,95 @@ func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr,
eh.parserHelper = p.helper
eh.id = exprID
expr, err := macro.Expander()(eh, target, args)
+ // An error indicates that the macro was matched, but the arguments were not well-formed.
if err != nil {
if err.Location != nil {
return p.reportError(err.Location, err.Message), true
}
return p.reportError(p.helper.getLocation(exprID), err.Message), true
}
+ // A nil value from the macro indicates that the macro implementation decided that
+ // an expansion should not be performed.
+ if expr == nil {
+ return nil, false
+ }
if p.populateMacroCalls {
p.helper.addMacroCall(expr.GetId(), function, target, args...)
}
return expr, true
}
+
+func (p *parser) checkAndIncrementRecursionDepth() {
+ p.recursionDepth++
+ if p.recursionDepth > p.maxRecursionDepth {
+ panic(&recursionError{message: "max recursion depth exceeded"})
+ }
+}
+
+func (p *parser) decrementRecursionDepth() {
+ p.recursionDepth--
+}
+
+// unnest traverses down the left-hand side of the parse graph until it encounters the first compound
+// parse node or the first leaf in the parse graph.
+func unnest(tree antlr.ParseTree) antlr.ParseTree {
+ for tree != nil {
+ switch t := tree.(type) {
+ case *gen.ExprContext:
+ // conditionalOr op='?' conditionalOr : expr
+ if t.GetOp() != nil {
+ return t
+ }
+ // conditionalOr
+ tree = t.GetE()
+ case *gen.ConditionalOrContext:
+ // conditionalAnd (ops=|| conditionalAnd)*
+ if t.GetOps() != nil && len(t.GetOps()) > 0 {
+ return t
+ }
+ // conditionalAnd
+ tree = t.GetE()
+ case *gen.ConditionalAndContext:
+ // relation (ops=&& relation)*
+ if t.GetOps() != nil && len(t.GetOps()) > 0 {
+ return t
+ }
+ // relation
+ tree = t.GetE()
+ case *gen.RelationContext:
+ // relation op relation
+ if t.GetOp() != nil {
+ return t
+ }
+ // calc
+ tree = t.Calc()
+ case *gen.CalcContext:
+ // calc op calc
+ if t.GetOp() != nil {
+ return t
+ }
+ // unary
+ tree = t.Unary()
+ case *gen.MemberExprContext:
+ // member expands to one of: primary, select, index, or create message
+ tree = t.Member()
+ case *gen.PrimaryExprContext:
+ // primary expands to one of identifier, nested, create list, create struct, literal
+ tree = t.Primary()
+ case *gen.NestedContext:
+ // contains a nested 'expr'
+ tree = t.GetE()
+ case *gen.ConstantLiteralContext:
+ // expands to a primitive literal
+ tree = t.Literal()
+ default:
+ return t
+ }
+ }
+ return tree
+}
+
+var (
+ reservedIdentifier = regexp.MustCompile("no viable alternative at input '.(true|false|null)'")
+ mismatchedReservedIdentifier = "mismatched input '$1' expecting IDENTIFIER"
+)
diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/unparser.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/unparser.go
index a459bb4a98d0..5ff979aa6009 100644
--- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/unparser.go
+++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/unparser.go
@@ -106,9 +106,15 @@ func (un *unparser) visitCall(expr *exprpb.Expr) error {
// ternary operator
case operators.Conditional:
return un.visitCallConditional(expr)
+ // optional select operator
+ case operators.OptSelect:
+ return un.visitOptSelect(expr)
// index operator
case operators.Index:
return un.visitCallIndex(expr)
+ // optional index operator
+ case operators.OptIndex:
+ return un.visitCallOptIndex(expr)
// unary operators
case operators.LogicalNot, operators.Negate:
return un.visitCallUnary(expr)
@@ -218,6 +224,14 @@ func (un *unparser) visitCallFunc(expr *exprpb.Expr) error {
}
func (un *unparser) visitCallIndex(expr *exprpb.Expr) error {
+ return un.visitCallIndexInternal(expr, "[")
+}
+
+func (un *unparser) visitCallOptIndex(expr *exprpb.Expr) error {
+ return un.visitCallIndexInternal(expr, "[?")
+}
+
+func (un *unparser) visitCallIndexInternal(expr *exprpb.Expr, op string) error {
c := expr.GetCallExpr()
args := c.GetArgs()
nested := isBinaryOrTernaryOperator(args[0])
@@ -225,7 +239,7 @@ func (un *unparser) visitCallIndex(expr *exprpb.Expr) error {
if err != nil {
return err
}
- un.str.WriteString("[")
+ un.str.WriteString(op)
err = un.visit(args[1])
if err != nil {
return err
@@ -289,8 +303,15 @@ func (un *unparser) visitIdent(expr *exprpb.Expr) error {
func (un *unparser) visitList(expr *exprpb.Expr) error {
l := expr.GetListExpr()
elems := l.GetElements()
+ optIndices := make(map[int]bool, len(elems))
+ for _, idx := range l.GetOptionalIndices() {
+ optIndices[int(idx)] = true
+ }
un.str.WriteString("[")
for i, elem := range elems {
+ if optIndices[i] {
+ un.str.WriteString("?")
+ }
err := un.visit(elem)
if err != nil {
return err
@@ -303,20 +324,32 @@ func (un *unparser) visitList(expr *exprpb.Expr) error {
return nil
}
+func (un *unparser) visitOptSelect(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ args := c.GetArgs()
+ operand := args[0]
+ field := args[1].GetConstExpr().GetStringValue()
+ return un.visitSelectInternal(operand, false, ".?", field)
+}
+
func (un *unparser) visitSelect(expr *exprpb.Expr) error {
sel := expr.GetSelectExpr()
+ return un.visitSelectInternal(sel.GetOperand(), sel.GetTestOnly(), ".", sel.GetField())
+}
+
+func (un *unparser) visitSelectInternal(operand *exprpb.Expr, testOnly bool, op string, field string) error {
// handle the case when the select expression was generated by the has() macro.
- if sel.GetTestOnly() {
+ if testOnly {
un.str.WriteString("has(")
}
- nested := !sel.GetTestOnly() && isBinaryOrTernaryOperator(sel.GetOperand())
- err := un.visitMaybeNested(sel.GetOperand(), nested)
+ nested := !testOnly && isBinaryOrTernaryOperator(operand)
+ err := un.visitMaybeNested(operand, nested)
if err != nil {
return err
}
- un.str.WriteString(".")
- un.str.WriteString(sel.GetField())
- if sel.GetTestOnly() {
+ un.str.WriteString(op)
+ un.str.WriteString(field)
+ if testOnly {
un.str.WriteString(")")
}
return nil
@@ -339,6 +372,9 @@ func (un *unparser) visitStructMsg(expr *exprpb.Expr) error {
un.str.WriteString("{")
for i, entry := range entries {
f := entry.GetFieldKey()
+ if entry.GetOptionalEntry() {
+ un.str.WriteString("?")
+ }
un.str.WriteString(f)
un.str.WriteString(": ")
v := entry.GetValue()
@@ -360,6 +396,9 @@ func (un *unparser) visitStructMap(expr *exprpb.Expr) error {
un.str.WriteString("{")
for i, entry := range entries {
k := entry.GetMapKey()
+ if entry.GetOptionalEntry() {
+ un.str.WriteString("?")
+ }
err := un.visit(k)
if err != nil {
return err
@@ -492,11 +531,10 @@ func (un *unparser) writeOperatorWithWrapping(fun string, unmangled string) bool
un.str.WriteString(" ")
}
return true
- } else {
- un.str.WriteString(" ")
- un.str.WriteString(unmangled)
- un.str.WriteString(" ")
}
+ un.str.WriteString(" ")
+ un.str.WriteString(unmangled)
+ un.str.WriteString(" ")
return false
}
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/display.go b/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/display.go
index 028a760a91b5..8677ed49a0ea 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/display.go
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/display.go
@@ -46,8 +46,23 @@ func (schema *Schema) describeSchema(indent string) string {
if schema.Schema != nil {
result += indent + "$schema: " + *(schema.Schema) + "\n"
}
+ if schema.ReadOnly != nil && *schema.ReadOnly {
+ result += indent + fmt.Sprintf("readOnly: %+v\n", *(schema.ReadOnly))
+ }
+ if schema.WriteOnly != nil && *schema.WriteOnly {
+ result += indent + fmt.Sprintf("writeOnly: %+v\n", *(schema.WriteOnly))
+ }
if schema.ID != nil {
- result += indent + "id: " + *(schema.ID) + "\n"
+ switch strings.TrimSuffix(*schema.Schema, "#") {
+ case "http://json-schema.org/draft-04/schema#":
+ fallthrough
+ case "#":
+ fallthrough
+ case "":
+ result += indent + "id: " + *(schema.ID) + "\n"
+ default:
+ result += indent + "$id: " + *(schema.ID) + "\n"
+ }
}
if schema.MultipleOf != nil {
result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf))
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/models.go b/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/models.go
index 4781bdc5f500..0d877249abb1 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/models.go
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/models.go
@@ -23,9 +23,11 @@ import "gopkg.in/yaml.v3"
// All fields are pointers and are nil if the associated values
// are not specified.
type Schema struct {
- Schema *string // $schema
- ID *string // id keyword used for $ref resolution scope
- Ref *string // $ref, i.e. JSON Pointers
+ Schema *string // $schema
+ ID *string // id keyword used for $ref resolution scope
+ Ref *string // $ref, i.e. JSON Pointers
+ ReadOnly *bool
+ WriteOnly *bool
// http://json-schema.org/latest/json-schema-validation.html
// 5.1. Validation keywords for numeric instances (number and integer)
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/reader.go b/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/reader.go
index b8583d466023..a909a34128b7 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/reader.go
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/reader.go
@@ -165,7 +165,6 @@ func NewSchemaFromObject(jsonData *yaml.Node) *Schema {
default:
fmt.Printf("schemaValue: unexpected node %+v\n", jsonData)
- return nil
}
return nil
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/writer.go b/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/writer.go
index 340dc5f93306..15b1f905067a 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/writer.go
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/writer.go
@@ -16,6 +16,7 @@ package jsonschema
import (
"fmt"
+ "strings"
"gopkg.in/yaml.v3"
)
@@ -33,7 +34,11 @@ func renderMappingNode(node *yaml.Node, indent string) (result string) {
value := node.Content[i+1]
switch value.Kind {
case yaml.ScalarNode:
- result += "\"" + value.Value + "\""
+ if value.Tag == "!!bool" {
+ result += value.Value
+ } else {
+ result += "\"" + value.Value + "\""
+ }
case yaml.MappingNode:
result += renderMappingNode(value, innerIndent)
case yaml.SequenceNode:
@@ -58,7 +63,11 @@ func renderSequenceNode(node *yaml.Node, indent string) (result string) {
item := node.Content[i]
switch item.Kind {
case yaml.ScalarNode:
- result += innerIndent + "\"" + item.Value + "\""
+ if item.Tag == "!!bool" {
+ result += innerIndent + item.Value
+ } else {
+ result += innerIndent + "\"" + item.Value + "\""
+ }
case yaml.MappingNode:
result += innerIndent + renderMappingNode(item, innerIndent) + ""
default:
@@ -260,11 +269,26 @@ func (schema *Schema) nodeValue() *yaml.Node {
content = appendPair(content, "title", nodeForString(*schema.Title))
}
if schema.ID != nil {
- content = appendPair(content, "id", nodeForString(*schema.ID))
+ switch strings.TrimSuffix(*schema.Schema, "#") {
+ case "http://json-schema.org/draft-04/schema":
+ fallthrough
+ case "#":
+ fallthrough
+ case "":
+ content = appendPair(content, "id", nodeForString(*schema.ID))
+ default:
+ content = appendPair(content, "$id", nodeForString(*schema.ID))
+ }
}
if schema.Schema != nil {
content = appendPair(content, "$schema", nodeForString(*schema.Schema))
}
+ if schema.ReadOnly != nil && *schema.ReadOnly {
+ content = appendPair(content, "readOnly", nodeForBoolean(*schema.ReadOnly))
+ }
+ if schema.WriteOnly != nil && *schema.WriteOnly {
+ content = appendPair(content, "writeOnly", nodeForBoolean(*schema.WriteOnly))
+ }
if schema.Type != nil {
content = appendPair(content, "type", schema.Type.nodeValue())
}
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go
index 0f17907667bd..28c2777d5115 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go
@@ -7887,7 +7887,12 @@ func (m *Oauth2Scopes) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
- // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
+ if m.AdditionalProperties != nil {
+ for _, item := range m.AdditionalProperties {
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value))
+ }
+ }
return info
}
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go
index 5f4a7025ea9a..d54a84db7c0e 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go
@@ -8560,7 +8560,12 @@ func (m *Strings) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
- // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
+ if m.AdditionalProperties != nil {
+ for _, item := range m.AdditionalProperties {
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
+ info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value))
+ }
+ }
return info
}
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go
index 499e7f932d79..90a56f5526b2 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go
@@ -16,8 +16,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.18.1
+// protoc-gen-go v1.28.0
+// protoc v3.19.4
// source: openapiv3/OpenAPIv3.proto
package openapi_v3
@@ -6760,12 +6760,13 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{
0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61,
0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56,
0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33,
0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70,
- 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33,
+ 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto
index 1be335b89ba0..7aede5ed9091 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto
@@ -42,7 +42,7 @@ option java_package = "org.openapi_v3";
option objc_class_prefix = "OAS";
// The Go package name.
-option go_package = "./openapiv3;openapi_v3";
+option go_package = "github.com/google/gnostic/openapiv3;openapi_v3";
message AdditionalPropertiesItem {
oneof oneof {
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/README.md b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/README.md
index 5ee12d92e24e..83603b82aab7 100644
--- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/README.md
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/README.md
@@ -19,3 +19,7 @@ for OpenAPI.
The schema-generator directory contains support code which generates
openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown).
+
+### How to rebuild
+
+`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto`
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go
new file mode 100644
index 000000000000..ae242f304315
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go
@@ -0,0 +1,183 @@
+// Copyright 2022 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.0
+// protoc v3.19.4
+// source: openapiv3/annotations.proto
+
+package openapi_v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*Document)(nil),
+ Field: 1143,
+ Name: "openapi.v3.document",
+ Tag: "bytes,1143,opt,name=document",
+ Filename: "openapiv3/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*Operation)(nil),
+ Field: 1143,
+ Name: "openapi.v3.operation",
+ Tag: "bytes,1143,opt,name=operation",
+ Filename: "openapiv3/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*Schema)(nil),
+ Field: 1143,
+ Name: "openapi.v3.schema",
+ Tag: "bytes,1143,opt,name=schema",
+ Filename: "openapiv3/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*Schema)(nil),
+ Field: 1143,
+ Name: "openapi.v3.property",
+ Tag: "bytes,1143,opt,name=property",
+ Filename: "openapiv3/annotations.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional openapi.v3.Document document = 1143;
+ E_Document = &file_openapiv3_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // optional openapi.v3.Operation operation = 1143;
+ E_Operation = &file_openapiv3_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional openapi.v3.Schema schema = 1143;
+ E_Schema = &file_openapiv3_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional openapi.v3.Schema property = 1143;
+ E_Property = &file_openapiv3_annotations_proto_extTypes[3]
+)
+
+var File_openapiv3_annotations_proto protoreflect.FileDescriptor
+
+var file_openapiv3_annotations_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64,
+ 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a,
+ 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70,
+ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f,
+ 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76,
+ 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_openapiv3_annotations_proto_goTypes = []interface{}{
+ (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
+ (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
+ (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+ (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
+ (*Document)(nil), // 4: openapi.v3.Document
+ (*Operation)(nil), // 5: openapi.v3.Operation
+ (*Schema)(nil), // 6: openapi.v3.Schema
+}
+var file_openapiv3_annotations_proto_depIdxs = []int32{
+ 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions
+ 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions
+ 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions
+ 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions
+ 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document
+ 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation
+ 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema
+ 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema
+ 8, // [8:8] is the sub-list for method output_type
+ 8, // [8:8] is the sub-list for method input_type
+ 4, // [4:8] is the sub-list for extension type_name
+ 0, // [0:4] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_openapiv3_annotations_proto_init() }
+func file_openapiv3_annotations_proto_init() {
+ if File_openapiv3_annotations_proto != nil {
+ return
+ }
+ file_openapiv3_OpenAPIv3_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_openapiv3_annotations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 4,
+ NumServices: 0,
+ },
+ GoTypes: file_openapiv3_annotations_proto_goTypes,
+ DependencyIndexes: file_openapiv3_annotations_proto_depIdxs,
+ ExtensionInfos: file_openapiv3_annotations_proto_extTypes,
+ }.Build()
+ File_openapiv3_annotations_proto = out.File
+ file_openapiv3_annotations_proto_rawDesc = nil
+ file_openapiv3_annotations_proto_goTypes = nil
+ file_openapiv3_annotations_proto_depIdxs = nil
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.proto b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.proto
new file mode 100644
index 000000000000..0bd87810db60
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.proto
@@ -0,0 +1,60 @@
+// Copyright 2022 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package openapi.v3;
+
+import "openapiv3/OpenAPIv3.proto";
+import "google/protobuf/descriptor.proto";
+
+// This option lets the proto compiler generate Java code inside the package
+// name (see below) instead of inside an outer class. It creates a simpler
+// developer experience by reducing one-level of name nesting and be
+// consistent with most programming languages that don't support outer classes.
+option java_multiple_files = true;
+
+// The Java outer classname should be the filename in UpperCamelCase. This
+// class is only used to hold proto descriptor, so developers don't need to
+// work with it directly.
+option java_outer_classname = "AnnotationsProto";
+
+// The Java package name must be proto package name with proper prefix.
+option java_package = "org.openapi_v3";
+
+// A reasonable prefix for the Objective-C symbols generated from the package.
+// It should at a minimum be 3 characters long, all uppercase, and convention
+// is to use an abbreviation of the package name. Something short, but
+// hopefully unique enough to not conflict with things that may come along in
+// the future. 'GPB' is reserved for the protocol buffer implementation itself.
+option objc_class_prefix = "OAS";
+
+// The Go package name.
+option go_package = "github.com/google/gnostic/openapiv3;openapi_v3";
+
+extend google.protobuf.FileOptions {
+ Document document = 1143;
+}
+
+extend google.protobuf.MethodOptions {
+ Operation operation = 1143;
+}
+
+extend google.protobuf.MessageOptions {
+ Schema schema = 1143;
+}
+
+extend google.protobuf.FieldOptions {
+ Schema property = 1143;
+}
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/google/gofuzz/.travis.yml b/cluster-autoscaler/vendor/github.com/google/gofuzz/.travis.yml
index f8684d99fc4e..061d72ae079b 100644
--- a/cluster-autoscaler/vendor/github.com/google/gofuzz/.travis.yml
+++ b/cluster-autoscaler/vendor/github.com/google/gofuzz/.travis.yml
@@ -1,13 +1,10 @@
language: go
go:
- - 1.4
- - 1.3
- - 1.2
- - tip
-
-install:
- - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - master
script:
- go test -cover
diff --git a/cluster-autoscaler/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/cluster-autoscaler/vendor/github.com/google/gofuzz/CONTRIBUTING.md
index 51cf5cd1adae..97c1b34fd5e6 100644
--- a/cluster-autoscaler/vendor/github.com/google/gofuzz/CONTRIBUTING.md
+++ b/cluster-autoscaler/vendor/github.com/google/gofuzz/CONTRIBUTING.md
@@ -1,7 +1,7 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
-a just a few small guidelines you need to follow.
+just a few small guidelines you need to follow.
## Contributor License Agreement ##
diff --git a/cluster-autoscaler/vendor/github.com/google/gofuzz/README.md b/cluster-autoscaler/vendor/github.com/google/gofuzz/README.md
index 386c2a457a8b..b503aae7d713 100644
--- a/cluster-autoscaler/vendor/github.com/google/gofuzz/README.md
+++ b/cluster-autoscaler/vendor/github.com/google/gofuzz/README.md
@@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
See more examples in ```example_test.go```.
+You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing.
+go-fuzz provides the user a byte-slice, which should be converted to different inputs
+for the tested function. This library can help convert the byte slice. Consider for
+example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments:
+```go
+// +build gofuzz
+package mypackage
+
+import fuzz "github.com/google/gofuzz"
+
+func Fuzz(data []byte) int {
+ var i int
+ fuzz.NewFromGoFuzz(data).Fuzz(&i)
+ MyFunc(i)
+ return 0
+}
+```
+
Happy testing!
diff --git a/cluster-autoscaler/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/cluster-autoscaler/vendor/github.com/google/gofuzz/bytesource/bytesource.go
new file mode 100644
index 000000000000..5bb365949691
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/google/gofuzz/bytesource/bytesource.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package bytesource provides a rand.Source64 that is determined by a slice of bytes.
+package bytesource
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "math/rand"
+)
+
+// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are
+// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a
+// fallback pseudo random source is created in case more random numbers are required.
+// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly.
+type ByteSource struct {
+ *bytes.Reader
+ fallback rand.Source
+}
+
+// New returns a new ByteSource from a given slice of bytes.
+func New(input []byte) *ByteSource {
+ s := &ByteSource{
+ Reader: bytes.NewReader(input),
+ fallback: rand.NewSource(0),
+ }
+ if len(input) > 0 {
+ s.fallback = rand.NewSource(int64(s.consumeUint64()))
+ }
+ return s
+}
+
+func (s *ByteSource) Uint64() uint64 {
+ // Return from input if it was not exhausted.
+ if s.Len() > 0 {
+ return s.consumeUint64()
+ }
+
+ // Input was exhausted, return random number from fallback (in this case fallback should not be
+ // nil). Try first having a Uint64 output (Should work in current rand implementation),
+ // otherwise return a conversion of Int63.
+ if s64, ok := s.fallback.(rand.Source64); ok {
+ return s64.Uint64()
+ }
+ return uint64(s.fallback.Int63())
+}
+
+func (s *ByteSource) Int63() int64 {
+ return int64(s.Uint64() >> 1)
+}
+
+func (s *ByteSource) Seed(seed int64) {
+ s.fallback = rand.NewSource(seed)
+ s.Reader = bytes.NewReader(nil)
+}
+
+// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the
+// bytes reader is not empty.
+func (s *ByteSource) consumeUint64() uint64 {
+ var bytes [8]byte
+ _, err := s.Read(bytes[:])
+ if err != nil && err != io.EOF {
+ panic("failed reading source") // Should not happen.
+ }
+ return binary.BigEndian.Uint64(bytes[:])
+}
diff --git a/cluster-autoscaler/vendor/github.com/google/gofuzz/fuzz.go b/cluster-autoscaler/vendor/github.com/google/gofuzz/fuzz.go
index da0a5f93800d..761520a8ceeb 100644
--- a/cluster-autoscaler/vendor/github.com/google/gofuzz/fuzz.go
+++ b/cluster-autoscaler/vendor/github.com/google/gofuzz/fuzz.go
@@ -22,6 +22,9 @@ import (
"reflect"
"regexp"
"time"
+
+ "github.com/google/gofuzz/bytesource"
+ "strings"
)
// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
@@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer {
return f
}
+// NewFromGoFuzz is a helper function that enables using gofuzz (this
+// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
+// fuzzing. Essentially, it enables translating the fuzzing bytes from
+// go-fuzz to any Go object using this library.
+//
+// This implementation promises a constant translation from a given slice of
+// bytes to the fuzzed objects. This promise will remain over future
+// versions of Go and of this library.
+//
+// Note: the returned Fuzzer should not be shared between multiple goroutines,
+// as its deterministic output will no longer be available.
+//
+// Example: use go-fuzz to test the function `MyFunc(int)` in the package
+// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
+//
+// // +build gofuzz
+// package mypacakge
+// import fuzz "github.com/google/gofuzz"
+// func Fuzz(data []byte) int {
+// var i int
+// fuzz.NewFromGoFuzz(data).Fuzz(&i)
+// MyFunc(i)
+// return 0
+// }
+func NewFromGoFuzz(data []byte) *Fuzzer {
+ return New().RandSource(bytesource.New(data))
+}
+
// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
//
// Each entry in fuzzFuncs must be a function taking two parameters.
@@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int {
}
func (f *Fuzzer) genShouldFill() bool {
- return f.r.Float64() > f.nilChance
+ return f.r.Float64() >= f.nilChance
}
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
@@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
fn(v, fc.fuzzer.r)
return
}
+
switch v.Kind() {
case reflect.Map:
if fc.fuzzer.genShouldFill() {
@@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
v.SetFloat(r.Float64())
},
reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
- panic("unimplemented")
+ v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
},
reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
- panic("unimplemented")
+ v.SetComplex(complex(r.Float64(), r.Float64()))
},
reflect.String: func(v reflect.Value, r *rand.Rand) {
v.SetString(randString(r))
@@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
// randBool returns true or false randomly.
func randBool(r *rand.Rand) bool {
- if r.Int()&1 == 1 {
- return true
- }
- return false
+ return r.Int31()&(1<<30) == 0
+}
+
+type int63nPicker interface {
+ Int63n(int64) int64
}
-type charRange struct {
- first, last rune
+// UnicodeRange describes a sequential range of unicode characters.
+// Last must be numerically greater than First.
+type UnicodeRange struct {
+ First, Last rune
}
+// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
+// To be useful, each range must have at least one character (First <= Last) and
+// there must be at least one range.
+type UnicodeRanges []UnicodeRange
+
// choose returns a random unicode character from the given range, using the
// given randomness source.
-func (r *charRange) choose(rand *rand.Rand) rune {
- count := int64(r.last - r.first)
- return r.first + rune(rand.Int63n(count))
+func (ur UnicodeRange) choose(r int63nPicker) rune {
+ count := int64(ur.Last - ur.First + 1)
+ return ur.First + rune(r.Int63n(count))
+}
+
+// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
+// Each character is selected from the range ur. If there are no characters
+// in the range (cr.Last < cr.First), this will panic.
+func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) {
+ ur.check()
+ return func(s *string, c Continue) {
+ *s = ur.randString(c.Rand)
+ }
}
-var unicodeRanges = []charRange{
+// check is a function that used to check whether the first of ur(UnicodeRange)
+// is greater than the last one.
+func (ur UnicodeRange) check() {
+ if ur.Last < ur.First {
+ panic("The last encoding must be greater than the first one.")
+ }
+}
+
+// randString of UnicodeRange makes a random string up to 20 characters long.
+// Each character is selected form ur(UnicodeRange).
+func (ur UnicodeRange) randString(r *rand.Rand) string {
+ n := r.Intn(20)
+ sb := strings.Builder{}
+ sb.Grow(n)
+ for i := 0; i < n; i++ {
+ sb.WriteRune(ur.choose(r))
+ }
+ return sb.String()
+}
+
+// defaultUnicodeRanges sets a default unicode range when user do not set
+// CustomStringFuzzFunc() but wants fuzz string.
+var defaultUnicodeRanges = UnicodeRanges{
{' ', '~'}, // ASCII characters
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
}
+// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
+// Each character is selected from one of the ranges of ur(UnicodeRanges).
+// Each range has an equal probability of being chosen. If there are no ranges,
+// or a selected range has no characters (.Last < .First), this will panic.
+// Do not modify any of the ranges in ur after calling this function.
+func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) {
+ // Check unicode ranges slice is empty.
+ if len(ur) == 0 {
+ panic("UnicodeRanges is empty.")
+ }
+ // if not empty, each range should be checked.
+ for i := range ur {
+ ur[i].check()
+ }
+ return func(s *string, c Continue) {
+ *s = ur.randString(c.Rand)
+ }
+}
+
+// randString of UnicodeRanges makes a random string up to 20 characters long.
+// Each character is selected form one of the ranges of ur(UnicodeRanges),
+// and each range has an equal probability of being chosen.
+func (ur UnicodeRanges) randString(r *rand.Rand) string {
+ n := r.Intn(20)
+ sb := strings.Builder{}
+ sb.Grow(n)
+ for i := 0; i < n; i++ {
+ sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
+ }
+ return sb.String()
+}
+
// randString makes a random string up to 20 characters long. The returned string
// may include a variety of (valid) UTF-8 encodings.
func randString(r *rand.Rand) string {
- n := r.Intn(20)
- runes := make([]rune, n)
- for i := range runes {
- runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
- }
- return string(runes)
+ return defaultUnicodeRanges.randString(r)
}
// randUint64 makes random 64 bit numbers.
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE
new file mode 100644
index 000000000000..d64569567334
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
new file mode 100644
index 000000000000..b3283b815883
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
@@ -0,0 +1,185 @@
+// Copyright 2022 Google LLC.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner").
+//
+// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use.
+package client
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "io"
+ "net/rpc"
+ "os"
+ "os/exec"
+
+ "github.com/googleapis/enterprise-certificate-proxy/client/util"
+)
+
+const signAPI = "EnterpriseCertSigner.Sign"
+const certificateChainAPI = "EnterpriseCertSigner.CertificateChain"
+const publicKeyAPI = "EnterpriseCertSigner.Public"
+
+// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser.
+type Connection struct {
+ io.ReadCloser
+ io.WriteCloser
+}
+
+// Close closes c's underlying ReadCloser and WriteCloser.
+func (c *Connection) Close() error {
+ rerr := c.ReadCloser.Close()
+ werr := c.WriteCloser.Close()
+ if rerr != nil {
+ return rerr
+ }
+ return werr
+}
+
+func init() {
+ gob.Register(crypto.SHA256)
+ gob.Register(&rsa.PSSOptions{})
+}
+
+// SignArgs contains arguments to a crypto Signer.Sign method.
+type SignArgs struct {
+ Digest []byte // The content to sign.
+ Opts crypto.SignerOpts // Options for signing, such as Hash identifier.
+}
+
+// Key implements credential.Credential by holding the executed signer subprocess.
+type Key struct {
+ cmd *exec.Cmd // Pointer to the signer subprocess.
+ client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess.
+ publicKey crypto.PublicKey // Public key of loaded certificate.
+ chain [][]byte // Certificate chain of loaded certificate.
+}
+
+// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key.
+func (k *Key) CertificateChain() [][]byte {
+ return k.chain
+}
+
+// Close closes the RPC connection and kills the signer subprocess.
+// Call this to free up resources when the Key object is no longer needed.
+func (k *Key) Close() error {
+ if err := k.cmd.Process.Kill(); err != nil {
+ return fmt.Errorf("failed to kill signer process: %w", err)
+ }
+ // Wait for cmd to exit and release resources. Since the process is forcefully killed, this
+ // will return a non-nil error (varies by OS), which we will ignore.
+ _ = k.cmd.Wait()
+ // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed.
+ // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault.
+ if err := k.client.Close(); err.Error() != "close |0: file already closed" {
+ return fmt.Errorf("failed to close RPC connection: %w", err)
+ }
+ return nil
+}
+
+// Public returns the public key for this Key.
+func (k *Key) Public() crypto.PublicKey {
+ return k.publicKey
+}
+
+// Sign signs a message digest, using the specified signer options.
+func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) {
+ if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() {
+ return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size())
+ }
+ err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed)
+ return
+}
+
+// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable,
+// possibly due to missing config or missing binary path.
+var ErrCredUnavailable = errors.New("Cred is unavailable")
+
+// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate
+// related operations, including signing messages with the private key.
+//
+// The signer binary path is read from the specified configFilePath, if provided.
+// Otherwise, use the default config file path.
+//
+// The config file also specifies which certificate the signer should use.
+func Cred(configFilePath string) (*Key, error) {
+ if configFilePath == "" {
+ configFilePath = util.GetDefaultConfigFilePath()
+ }
+ enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath)
+ if err != nil {
+ if errors.Is(err, util.ErrConfigUnavailable) {
+ return nil, ErrCredUnavailable
+ }
+ return nil, err
+ }
+ k := &Key{
+ cmd: exec.Command(enterpriseCertSignerPath, configFilePath),
+ }
+
+ // Redirect errors from subprocess to parent process.
+ k.cmd.Stderr = os.Stderr
+
+ // RPC client will communicate with subprocess over stdin/stdout.
+ kin, err := k.cmd.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+ kout, err := k.cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+ k.client = rpc.NewClient(&Connection{kout, kin})
+
+ if err := k.cmd.Start(); err != nil {
+ return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err)
+ }
+
+ if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil {
+ return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err)
+ }
+
+ var publicKeyBytes []byte
+ if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil {
+ return nil, fmt.Errorf("failed to retrieve public key: %w", err)
+ }
+
+ publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse public key: %w", err)
+ }
+
+ var ok bool
+ k.publicKey, ok = publicKey.(crypto.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("invalid public key type: %T", publicKey)
+ }
+
+ switch pub := k.publicKey.(type) {
+ case *rsa.PublicKey:
+ if pub.Size() < 256 {
+ return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8)
+ }
+ case *ecdsa.PublicKey:
+ default:
+ return nil, fmt.Errorf("unsupported public key type: %v", pub)
+ }
+
+ return k, nil
+}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
new file mode 100644
index 000000000000..1640ec1c9e31
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
@@ -0,0 +1,91 @@
+// Copyright 2022 Google LLC.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package util provides helper functions for the client.
+package util
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+)
+
+const configFileName = "certificate_config.json"
+
+// EnterpriseCertificateConfig contains parameters for initializing signer.
+type EnterpriseCertificateConfig struct {
+ Libs Libs `json:"libs"`
+}
+
+// Libs specifies the locations of helper libraries.
+type Libs struct {
+ ECP string `json:"ecp"`
+}
+
+// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable,
+// possibly due to entire config missing or missing binary path.
+var ErrConfigUnavailable = errors.New("Config is unavailable")
+
+// LoadSignerBinaryPath retrieves the path of the signer binary from the config file.
+func LoadSignerBinaryPath(configFilePath string) (path string, err error) {
+ jsonFile, err := os.Open(configFilePath)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return "", ErrConfigUnavailable
+ }
+ return "", err
+ }
+
+ byteValue, err := io.ReadAll(jsonFile)
+ if err != nil {
+ return "", err
+ }
+ var config EnterpriseCertificateConfig
+ err = json.Unmarshal(byteValue, &config)
+ if err != nil {
+ return "", err
+ }
+ signerBinaryPath := config.Libs.ECP
+ if signerBinaryPath == "" {
+ return "", ErrConfigUnavailable
+ }
+ return signerBinaryPath, nil
+}
+
+func guessHomeDir() string {
+ // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
+ if v := os.Getenv("HOME"); v != "" {
+ return v
+ }
+ // Else, fall back to user.Current:
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
+ }
+ return ""
+}
+
+func getDefaultConfigFileDirectory() (directory string) {
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud")
+ }
+ return filepath.Join(guessHomeDir(), ".config/gcloud")
+}
+
+// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud.
+func GetDefaultConfigFilePath() (path string) {
+ return filepath.Join(getDefaultConfigFileDirectory(), configFileName)
+}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
new file mode 100644
index 000000000000..10295639c5a0
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ "v2": "2.7.1"
+}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
new file mode 100644
index 000000000000..41a7ca94d4d8
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
@@ -0,0 +1,54 @@
+# Changelog
+
+## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06)
+
+
+### Bug Fixes
+
+* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254)
+
+## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02)
+
+
+### Features
+
+* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6))
+* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f))
+
+## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13)
+
+
+### Features
+
+* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3))
+
+## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04)
+
+
+### Bug Fixes
+
+* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23))
+
+## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04)
+
+
+### Features
+
+* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb))
+
+## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09)
+
+
+### Features
+
+* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c))
+
+
+### Bug Fixes
+
+* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e))
+
+
+### Miscellaneous Chores
+
+* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719))
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
index 869379da96f3..ed862c8b3982 100644
--- a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
@@ -28,18 +28,21 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package apierror implements a wrapper error for parsing error details from
-// API calls. Currently, only errors representing a gRPC status are supported.
+// API calls. Both HTTP & gRPC status errors are supported.
package apierror
import (
+ "errors"
"fmt"
"strings"
jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
)
// ErrDetails holds the google/rpc/error_details.proto messages.
@@ -59,6 +62,30 @@ type ErrDetails struct {
Unknown []interface{}
}
+// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages.
+var ErrMessageNotFound = errors.New("message not found")
+
+// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the
+// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type,
+// the content of the message is copied to the provided message.
+//
+// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the
+// protocol buffer type of the provided message.
+func (e ErrDetails) ExtractProtoMessage(v proto.Message) error {
+ if v == nil {
+ return ErrMessageNotFound
+ }
+ for _, elem := range e.Unknown {
+ if elemProto, ok := elem.(proto.Message); ok {
+ if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() {
+ proto.Merge(v, elemProto)
+ return nil
+ }
+ }
+ }
+ return ErrMessageNotFound
+}
+
func (e ErrDetails) String() string {
var d strings.Builder
if e.ErrorInfo != nil {
@@ -171,12 +198,12 @@ func (a *APIError) Unwrap() error {
// Error returns a readable representation of the APIError.
func (a *APIError) Error() string {
var msg string
- if a.status != nil {
- msg = a.err.Error()
- } else if a.httpErr != nil {
+ if a.httpErr != nil {
// Truncate the googleapi.Error message because it dumps the Details in
// an ugly way.
msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message)
+ } else if a.status != nil {
+ msg = a.err.Error()
}
return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details))
}
@@ -207,29 +234,53 @@ func (a *APIError) Metadata() map[string]string {
}
-// FromError parses a Status error or a googleapi.Error and builds an APIError.
-func FromError(err error) (*APIError, bool) {
- if err == nil {
- return nil, false
- }
-
- ae := APIError{err: err}
+// setDetailsFromError parses a Status error or a googleapi.Error
+// and sets status and details or httpErr and details, respectively.
+// It returns false if neither Status nor googleapi.Error can be parsed.
+// When err is a googleapi.Error, the status of the returned error will
+// be set to an Unknown error, rather than nil, since a nil code is
+// interpreted as OK in the gRPC status package.
+func (a *APIError) setDetailsFromError(err error) bool {
st, isStatus := status.FromError(err)
- herr, isHTTPErr := err.(*googleapi.Error)
+ var herr *googleapi.Error
+ isHTTPErr := errors.As(err, &herr)
switch {
case isStatus:
- ae.status = st
- ae.details = parseDetails(st.Details())
+ a.status = st
+ a.details = parseDetails(st.Details())
case isHTTPErr:
- ae.httpErr = herr
- ae.details = parseHTTPDetails(herr)
+ a.httpErr = herr
+ a.details = parseHTTPDetails(herr)
+ a.status = status.New(codes.Unknown, herr.Message)
default:
- return nil, false
+ return false
}
+ return true
+}
- return &ae, true
+// FromError parses a Status error or a googleapi.Error and builds an
+// APIError, wrapping the provided error in the new APIError. It
+// returns false if neither Status nor googleapi.Error can be parsed.
+func FromError(err error) (*APIError, bool) {
+ return ParseError(err, true)
+}
+// ParseError parses a Status error or a googleapi.Error and builds an
+// APIError. If wrap is true, it wraps the error in the new APIError.
+// It returns false if neither Status nor googleapi.Error can be parsed.
+func ParseError(err error, wrap bool) (*APIError, bool) {
+ if err == nil {
+ return nil, false
+ }
+ ae := APIError{}
+ if wrap {
+ ae = APIError{err: err}
+ }
+ if !ae.setDetailsFromError(err) {
+ return nil, false
+ }
+ return &ae, true
}
// parseDetails accepts a slice of interface{} that should be backed by some
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go
new file mode 100644
index 000000000000..e4b03f161d82
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go
@@ -0,0 +1,256 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.0
+// protoc v3.17.3
+// source: custom_error.proto
+
+package jsonerror
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Error code for `CustomError`.
+type CustomError_CustomErrorCode int32
+
+const (
+ // Default error.
+ CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0
+ // Too many foo.
+ CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1
+ // Not enough foo.
+ CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2
+ // Catastrophic error.
+ CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3
+)
+
+// Enum value maps for CustomError_CustomErrorCode.
+var (
+ CustomError_CustomErrorCode_name = map[int32]string{
+ 0: "CUSTOM_ERROR_CODE_UNSPECIFIED",
+ 1: "TOO_MANY_FOO",
+ 2: "NOT_ENOUGH_FOO",
+ 3: "UNIVERSE_WAS_DESTROYED",
+ }
+ CustomError_CustomErrorCode_value = map[string]int32{
+ "CUSTOM_ERROR_CODE_UNSPECIFIED": 0,
+ "TOO_MANY_FOO": 1,
+ "NOT_ENOUGH_FOO": 2,
+ "UNIVERSE_WAS_DESTROYED": 3,
+ }
+)
+
+func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode {
+ p := new(CustomError_CustomErrorCode)
+ *p = x
+ return p
+}
+
+func (x CustomError_CustomErrorCode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor {
+ return file_custom_error_proto_enumTypes[0].Descriptor()
+}
+
+func (CustomError_CustomErrorCode) Type() protoreflect.EnumType {
+ return &file_custom_error_proto_enumTypes[0]
+}
+
+func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead.
+func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) {
+ return file_custom_error_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// CustomError is an example of a custom error message which may be included
+// in an rpc status. It is not meant to reflect a standard error.
+type CustomError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Error code specific to the custom API being invoked.
+ Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"`
+ // Name of the failed entity.
+ Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"`
+ // Message that describes the error.
+ ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *CustomError) Reset() {
+ *x = CustomError{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_custom_error_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CustomError) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CustomError) ProtoMessage() {}
+
+func (x *CustomError) ProtoReflect() protoreflect.Message {
+ mi := &file_custom_error_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CustomError.ProtoReflect.Descriptor instead.
+func (*CustomError) Descriptor() ([]byte, []int) {
+ return file_custom_error_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CustomError) GetCode() CustomError_CustomErrorCode {
+ if x != nil {
+ return x.Code
+ }
+ return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED
+}
+
+func (x *CustomError) GetEntity() string {
+ if x != nil {
+ return x.Entity
+ }
+ return ""
+}
+
+func (x *CustomError) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+var File_custom_error_proto protoreflect.FileDescriptor
+
+var file_custom_error_proto_rawDesc = []byte{
+ 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b,
+ 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63,
+ 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75,
+ 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63,
+ 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43,
+ 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52,
+ 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41,
+ 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f,
+ 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16,
+ 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53,
+ 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_custom_error_proto_rawDescOnce sync.Once
+ file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc
+)
+
+func file_custom_error_proto_rawDescGZIP() []byte {
+ file_custom_error_proto_rawDescOnce.Do(func() {
+ file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData)
+ })
+ return file_custom_error_proto_rawDescData
+}
+
+var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_custom_error_proto_goTypes = []interface{}{
+ (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode
+ (*CustomError)(nil), // 1: error.CustomError
+}
+var file_custom_error_proto_depIdxs = []int32{
+ 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_custom_error_proto_init() }
+func file_custom_error_proto_init() {
+ if File_custom_error_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CustomError); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_custom_error_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_custom_error_proto_goTypes,
+ DependencyIndexes: file_custom_error_proto_depIdxs,
+ EnumInfos: file_custom_error_proto_enumTypes,
+ MessageInfos: file_custom_error_proto_msgTypes,
+ }.Build()
+ File_custom_error_proto = out.File
+ file_custom_error_proto_rawDesc = nil
+ file_custom_error_proto_goTypes = nil
+ file_custom_error_proto_depIdxs = nil
+}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto
new file mode 100644
index 000000000000..21678ae65c99
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto
@@ -0,0 +1,50 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package error;
+
+option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror";
+
+
+// CustomError is an example of a custom error message which may be included
+// in an rpc status. It is not meant to reflect a standard error.
+message CustomError {
+
+ // Error code for `CustomError`.
+ enum CustomErrorCode {
+ // Default error.
+ CUSTOM_ERROR_CODE_UNSPECIFIED = 0;
+
+ // Too many foo.
+ TOO_MANY_FOO = 1;
+
+ // Not enough foo.
+ NOT_ENOUGH_FOO = 2;
+
+ // Catastrophic error.
+ UNIVERSE_WAS_DESTROYED = 3;
+
+ }
+
+ // Error code specific to the custom API being invoked.
+ CustomErrorCode code = 1;
+
+ // Name of the failed entity.
+ string entity = 2;
+
+ // Message that describes the error.
+ string error_message = 3;
+}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
index 27b34c06e281..7dd9b83739a9 100644
--- a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
@@ -14,9 +14,9 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.27.1
+// protoc-gen-go v1.28.0
// protoc v3.15.8
-// source: error.proto
+// source: apierror/internal/proto/error.proto
package jsonerror
@@ -55,7 +55,7 @@ type Error struct {
func (x *Error) Reset() {
*x = Error{}
if protoimpl.UnsafeEnabled {
- mi := &file_error_proto_msgTypes[0]
+ mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -68,7 +68,7 @@ func (x *Error) String() string {
func (*Error) ProtoMessage() {}
func (x *Error) ProtoReflect() protoreflect.Message {
- mi := &file_error_proto_msgTypes[0]
+ mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -81,7 +81,7 @@ func (x *Error) ProtoReflect() protoreflect.Message {
// Deprecated: Use Error.ProtoReflect.Descriptor instead.
func (*Error) Descriptor() ([]byte, []int) {
- return file_error_proto_rawDescGZIP(), []int{0}
+ return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0}
}
func (x *Error) GetError() *Error_Status {
@@ -112,7 +112,7 @@ type Error_Status struct {
func (x *Error_Status) Reset() {
*x = Error_Status{}
if protoimpl.UnsafeEnabled {
- mi := &file_error_proto_msgTypes[1]
+ mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -125,7 +125,7 @@ func (x *Error_Status) String() string {
func (*Error_Status) ProtoMessage() {}
func (x *Error_Status) ProtoReflect() protoreflect.Message {
- mi := &file_error_proto_msgTypes[1]
+ mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -138,7 +138,7 @@ func (x *Error_Status) ProtoReflect() protoreflect.Message {
// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead.
func (*Error_Status) Descriptor() ([]byte, []int) {
- return file_error_proto_rawDescGZIP(), []int{0, 0}
+ return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0}
}
func (x *Error_Status) GetCode() int32 {
@@ -169,53 +169,55 @@ func (x *Error_Status) GetDetails() []*anypb.Any {
return nil
}
-var File_error_proto protoreflect.FileDescriptor
+var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor
-var file_error_proto_rawDesc = []byte{
- 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72,
- 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
- 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e,
- 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43,
- 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76,
- 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72,
- 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+var file_apierror_internal_proto_error_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5,
+ 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f,
+ 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
+ 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64,
+ 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
+ 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
var (
- file_error_proto_rawDescOnce sync.Once
- file_error_proto_rawDescData = file_error_proto_rawDesc
+ file_apierror_internal_proto_error_proto_rawDescOnce sync.Once
+ file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc
)
-func file_error_proto_rawDescGZIP() []byte {
- file_error_proto_rawDescOnce.Do(func() {
- file_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_error_proto_rawDescData)
+func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte {
+ file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() {
+ file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData)
})
- return file_error_proto_rawDescData
+ return file_apierror_internal_proto_error_proto_rawDescData
}
-var file_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_error_proto_goTypes = []interface{}{
+var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_apierror_internal_proto_error_proto_goTypes = []interface{}{
(*Error)(nil), // 0: error.Error
(*Error_Status)(nil), // 1: error.Error.Status
(code.Code)(0), // 2: google.rpc.Code
(*anypb.Any)(nil), // 3: google.protobuf.Any
}
-var file_error_proto_depIdxs = []int32{
+var file_apierror_internal_proto_error_proto_depIdxs = []int32{
1, // 0: error.Error.error:type_name -> error.Error.Status
2, // 1: error.Error.Status.status:type_name -> google.rpc.Code
3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any
@@ -226,13 +228,13 @@ var file_error_proto_depIdxs = []int32{
0, // [0:3] is the sub-list for field type_name
}
-func init() { file_error_proto_init() }
-func file_error_proto_init() {
- if File_error_proto != nil {
+func init() { file_apierror_internal_proto_error_proto_init() }
+func file_apierror_internal_proto_error_proto_init() {
+ if File_apierror_internal_proto_error_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
- file_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Error); i {
case 0:
return &v.state
@@ -244,7 +246,7 @@ func file_error_proto_init() {
return nil
}
}
- file_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Error_Status); i {
case 0:
return &v.state
@@ -261,18 +263,18 @@ func file_error_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_error_proto_rawDesc,
+ RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
- GoTypes: file_error_proto_goTypes,
- DependencyIndexes: file_error_proto_depIdxs,
- MessageInfos: file_error_proto_msgTypes,
+ GoTypes: file_apierror_internal_proto_error_proto_goTypes,
+ DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs,
+ MessageInfos: file_apierror_internal_proto_error_proto_msgTypes,
}.Build()
- File_error_proto = out.File
- file_error_proto_rawDesc = nil
- file_error_proto_goTypes = nil
- file_error_proto_depIdxs = nil
+ File_apierror_internal_proto_error_proto = out.File
+ file_apierror_internal_proto_error_proto_rawDesc = nil
+ file_apierror_internal_proto_error_proto_goTypes = nil
+ file_apierror_internal_proto_error_proto_depIdxs = nil
}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/call_option.go b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/call_option.go
index 425a7668d1ee..e092005563bf 100644
--- a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/call_option.go
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/call_option.go
@@ -30,9 +30,11 @@
package gax
import (
+ "errors"
"math/rand"
"time"
+ "google.golang.org/api/googleapi"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -119,6 +121,41 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) {
return 0, false
}
+// OnHTTPCodes returns a Retryer that retries if and only if
+// the previous attempt returns a googleapi.Error whose status code is stored in
+// cc. Pause times between retries are specified by bo.
+//
+// bo is only used for its parameters; each Retryer has its own copy.
+func OnHTTPCodes(bo Backoff, cc ...int) Retryer {
+ codes := make(map[int]bool, len(cc))
+ for _, c := range cc {
+ codes[c] = true
+ }
+
+ return &httpRetryer{
+ backoff: bo,
+ codes: codes,
+ }
+}
+
+type httpRetryer struct {
+ backoff Backoff
+ codes map[int]bool
+}
+
+func (r *httpRetryer) Retry(err error) (time.Duration, bool) {
+ var gerr *googleapi.Error
+ if !errors.As(err, &gerr) {
+ return 0, false
+ }
+
+ if r.codes[gerr.Code] {
+ return r.backoff.Pause(), true
+ }
+
+ return 0, false
+}
+
// Backoff implements exponential backoff. The wait time between retries is a
// random value between 0 and the "retry period" - the time between retries. The
// retry period starts at Initial and increases by the factor of Multiplier
@@ -173,6 +210,21 @@ func (o grpcOpt) Resolve(s *CallSettings) {
s.GRPC = o
}
+type pathOpt struct {
+ p string
+}
+
+func (p pathOpt) Resolve(s *CallSettings) {
+ s.Path = p.p
+}
+
+// WithPath applies a Path override to the HTTP-based APICall.
+//
+// This is for internal use only.
+func WithPath(p string) CallOption {
+ return &pathOpt{p: p}
+}
+
// WithGRPCOptions allows passing gRPC call options during client creation.
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
return grpcOpt(append([]grpc.CallOption(nil), opt...))
@@ -186,4 +238,7 @@ type CallSettings struct {
// CallOptions to be forwarded to GRPC.
GRPC []grpc.CallOption
+
+ // Path is an HTTP override for an APICall.
+ Path string
}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/content_type.go b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/content_type.go
new file mode 100644
index 000000000000..1b53d0a3ac1a
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/content_type.go
@@ -0,0 +1,112 @@
+// Copyright 2022, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+const sniffBuffSize = 512
+
+func newContentSniffer(r io.Reader) *contentSniffer {
+ return &contentSniffer{r: r}
+}
+
+// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
+type contentSniffer struct {
+ r io.Reader
+ start []byte // buffer for the sniffed bytes.
+ err error // set to any error encountered while reading bytes to be sniffed.
+
+ ctype string // set on first sniff.
+ sniffed bool // set to true on first sniff.
+}
+
+func (cs *contentSniffer) Read(p []byte) (n int, err error) {
+ // Ensure that the content type is sniffed before any data is consumed from Reader.
+ _, _ = cs.ContentType()
+
+ if len(cs.start) > 0 {
+ n := copy(p, cs.start)
+ cs.start = cs.start[n:]
+ return n, nil
+ }
+
+ // We may have read some bytes into start while sniffing, even if the read ended in an error.
+ // We should first return those bytes, then the error.
+ if cs.err != nil {
+ return 0, cs.err
+ }
+
+ // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
+ return cs.r.Read(p)
+}
+
+// ContentType returns the sniffed content type, and whether the content type was successfully sniffed.
+func (cs *contentSniffer) ContentType() (string, bool) {
+ if cs.sniffed {
+ return cs.ctype, cs.ctype != ""
+ }
+ cs.sniffed = true
+ // If ReadAll hits EOF, it returns err==nil.
+ cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
+
+ // Don't try to detect the content type based on possibly incomplete data.
+ if cs.err != nil {
+ return "", false
+ }
+
+ cs.ctype = http.DetectContentType(cs.start)
+ return cs.ctype, true
+}
+
+// DetermineContentType determines the content type of the supplied reader.
+// The content of media will be sniffed to determine the content type.
+// After calling DetectContentType the caller must not perform further reads on
+// media, but rather read from the Reader that is returned.
+func DetermineContentType(media io.Reader) (io.Reader, string) {
+ // For backwards compatibility, allow clients to set content
+ // type by providing a ContentTyper for media.
+ // Note: This is an anonymous interface definition copied from googleapi.ContentTyper.
+ if typer, ok := media.(interface {
+ ContentType() string
+ }); ok {
+ return media, typer.ContentType()
+ }
+
+ sniffer := newContentSniffer(media)
+ if ctype, ok := sniffer.ContentType(); ok {
+ return sniffer, ctype
+ }
+ // If content type could not be sniffed, reads from sniffer will eventually fail with an error.
+ return sniffer, ""
+}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/gax.go b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/gax.go
index f634b43727eb..36cdfa33e351 100644
--- a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/gax.go
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/gax.go
@@ -35,5 +35,7 @@
// to simplify code generation and to provide more convenient and idiomatic API surfaces.
package gax
+import "github.com/googleapis/gax-go/v2/internal"
+
// Version specifies the gax-go version being used.
-const Version = "2.1.1"
+const Version = internal.Version
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/internal/version.go
new file mode 100644
index 000000000000..936873ec4f8e
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/internal/version.go
@@ -0,0 +1,33 @@
+// Copyright 2022, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package internal
+
+// Version is the current tagged release of the library.
+const Version = "2.7.1"
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go
new file mode 100644
index 000000000000..cc4486eb9e5f
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go
@@ -0,0 +1,126 @@
+// Copyright 2022, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var (
+ arrayOpen = json.Delim('[')
+ arrayClose = json.Delim(']')
+ errBadOpening = errors.New("unexpected opening token, expected '['")
+)
+
+// ProtoJSONStream represents a wrapper for consuming a stream of protobuf
+// messages encoded using protobuf-JSON format. More information on this format
+// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json.
+// The stream must appear as a comma-delimited, JSON array of obbjects with
+// opening and closing square braces.
+//
+// This is for internal use only.
+type ProtoJSONStream struct {
+ first, closed bool
+ reader io.ReadCloser
+ stream *json.Decoder
+ typ protoreflect.MessageType
+}
+
+// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are
+// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream
+// must be closed when done.
+//
+// This is for internal use only.
+func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream {
+ return &ProtoJSONStream{
+ first: true,
+ reader: rc,
+ stream: json.NewDecoder(rc),
+ typ: typ,
+ }
+}
+
+// Recv decodes the next protobuf message in the stream or returns io.EOF if
+// the stream is done. It is not safe to call Recv on the same stream from
+// different goroutines, just like it is not safe to do so with a single gRPC
+// stream. Type-cast the protobuf message returned to the type provided at
+// ProtoJSONStream creation.
+// Calls to Recv after calling Close will produce io.EOF.
+func (s *ProtoJSONStream) Recv() (proto.Message, error) {
+ if s.closed {
+ return nil, io.EOF
+ }
+ if s.first {
+ s.first = false
+
+ // Consume the opening '[' so Decode gets one object at a time.
+ if t, err := s.stream.Token(); err != nil {
+ return nil, err
+ } else if t != arrayOpen {
+ return nil, errBadOpening
+ }
+ }
+
+ // Capture the next block of data for the item (a JSON object) in the stream.
+ var raw json.RawMessage
+ if err := s.stream.Decode(&raw); err != nil {
+ e := err
+ // To avoid checking the first token of each stream, just attempt to
+ // Decode the next blob and if that fails, double check if it is just
+ // the closing token ']'. If it is the closing, return io.EOF. If it
+ // isn't, return the original error.
+ if t, _ := s.stream.Token(); t == arrayClose {
+ e = io.EOF
+ }
+ return nil, e
+ }
+
+ // Initialize a new instance of the protobuf message to unmarshal the
+ // raw data into.
+ m := s.typ.New().Interface()
+ err := protojson.Unmarshal(raw, m)
+
+ return m, err
+}
+
+// Close closes the stream so that resources are cleaned up.
+func (s *ProtoJSONStream) Close() error {
+ // Dereference the *json.Decoder so that the memory is gc'd.
+ s.stream = nil
+ s.closed = true
+
+ return s.reader.Close()
+}
diff --git a/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/release-please-config.json
new file mode 100644
index 000000000000..61ee266a159e
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/googleapis/gax-go/v2/release-please-config.json
@@ -0,0 +1,10 @@
+{
+ "release-type": "go-yoshi",
+ "separate-pull-requests": true,
+ "include-component-in-tag": false,
+ "packages": {
+ "v2": {
+ "component": "v2"
+ }
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
index 138f7c12f0e3..c056bd3058ad 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
@@ -1,10 +1,10 @@
+//go:build gofuzz
// +build gofuzz
package httprule
func Fuzz(data []byte) int {
- _, err := Parse(string(data))
- if err != nil {
+ if _, err := Parse(string(data)); err != nil {
return 0
}
return 0
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
index 5edd784e62ad..65ffcf5cf871 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
@@ -1,6 +1,7 @@
package httprule
import (
+ "errors"
"fmt"
"strings"
)
@@ -164,9 +165,9 @@ func (p *parser) segment() (segment, error) {
v, err := p.variable()
if err != nil {
- return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
+ return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err)
}
- return v, err
+ return v, nil
}
func (p *parser) literal() (segment, error) {
@@ -191,7 +192,7 @@ func (p *parser) variable() (segment, error) {
if _, err := p.accept("="); err == nil {
segs, err = p.segments()
if err != nil {
- return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
+ return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err)
}
} else {
segs = []segment{wildcard{}}
@@ -213,12 +214,12 @@ func (p *parser) fieldPath() (string, error) {
}
components := []string{c}
for {
- if _, err = p.accept("."); err != nil {
+ if _, err := p.accept("."); err != nil {
return strings.Join(components, "."), nil
}
c, err := p.accept(typeIdent)
if err != nil {
- return "", fmt.Errorf("invalid field path component: %v", err)
+ return "", fmt.Errorf("invalid field path component: %w", err)
}
components = append(components, c)
}
@@ -237,10 +238,8 @@ const (
typeEOF = termType("$")
)
-const (
- // eof is the terminal symbol which always appears at the end of token sequence.
- eof = "\u0000"
-)
+// eof is the terminal symbol which always appears at the end of token sequence.
+const eof = "\u0000"
// accept tries to accept a token in "p".
// This function consumes a token and returns it if it matches to the specified "term".
@@ -275,11 +274,12 @@ func (p *parser) accept(term termType) (string, error) {
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
//
// https://www.ietf.org/rfc/rfc3986.txt, P.49
-// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
-// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
-// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
-// / "*" / "+" / "," / ";" / "="
-// pct-encoded = "%" HEXDIG HEXDIG
+//
+// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+// / "*" / "+" / "," / ";" / "="
+// pct-encoded = "%" HEXDIG HEXDIG
func expectPChars(t string) error {
const (
init = iota
@@ -333,7 +333,7 @@ func expectPChars(t string) error {
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
func expectIdent(ident string) error {
if ident == "" {
- return fmt.Errorf("empty identifier")
+ return errors.New("empty identifier")
}
for pos, r := range ident {
switch {
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
index 95f867a5286c..b5140a3c9d16 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -30,6 +30,7 @@ go_library(
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//grpclog",
+ "@org_golang_google_grpc//health/grpc_health_v1",
"@org_golang_google_grpc//metadata",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//encoding/protojson",
@@ -37,6 +38,7 @@ go_library(
"@org_golang_google_protobuf//reflect/protoreflect",
"@org_golang_google_protobuf//reflect/protoregistry",
"@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/structpb",
"@org_golang_google_protobuf//types/known/timestamppb",
"@org_golang_google_protobuf//types/known/wrapperspb",
],
@@ -56,8 +58,10 @@ go_test(
"marshal_jsonpb_test.go",
"marshal_proto_test.go",
"marshaler_registry_test.go",
+ "mux_internal_test.go",
"mux_test.go",
"pattern_test.go",
+ "query_fuzz_test.go",
"query_test.go",
],
embed = [":runtime"],
@@ -70,7 +74,9 @@ go_test(
"@go_googleapis//google/rpc:errdetails_go_proto",
"@go_googleapis//google/rpc:status_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+ "@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//health/grpc_health_v1",
"@org_golang_google_grpc//metadata",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//encoding/protojson",
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
index fb57b9366eab..31553e7848a9 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -13,6 +13,7 @@ import (
"time"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@@ -35,11 +36,15 @@ const metadataHeaderBinarySuffix = "-Bin"
const xForwardedFor = "X-Forwarded-For"
const xForwardedHost = "X-Forwarded-Host"
-var (
- // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
- // header isn't present. If the value is 0 the sent `context` will not have a timeout.
- DefaultContextTimeout = 0 * time.Second
-)
+// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+var DefaultContextTimeout = 0 * time.Second
+
+// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
+// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
+var malformedHTTPHeaders = map[string]struct{}{
+ "connection": {},
+}
type (
rpcMethodKey struct{}
@@ -95,12 +100,43 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque
return metadata.NewIncomingContext(ctx, md), nil
}
+func isValidGRPCMetadataKey(key string) bool {
+ // Must be a valid gRPC "Header-Name" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means 0-9 a-z _ - .
+ // Only lowercase letters are valid in the wire protocol, but the client library will normalize
+ // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable.
+ bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ validLowercaseLetter := ch >= 'a' && ch <= 'z'
+ validUppercaseLetter := ch >= 'A' && ch <= 'Z'
+ validDigit := ch >= '0' && ch <= '9'
+ validOther := ch == '.' || ch == '-' || ch == '_'
+ if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther {
+ return false
+ }
+ }
+ return true
+}
+
+func isValidGRPCMetadataTextValue(textValue string) bool {
+ // Must be a valid gRPC "ASCII-Value" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive.
+ bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ if ch < 0x20 || ch > 0x7E {
+ return false
+ }
+ }
+ return true
+}
+
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
ctx = withRPCMethod(ctx, rpcMethodName)
for _, o := range options {
ctx = o(ctx)
}
- var pairs []string
timeout := DefaultContextTimeout
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
var err error
@@ -109,7 +145,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
}
}
-
+ var pairs []string
for key, vals := range req.Header {
key = textproto.CanonicalMIMEHeaderKey(key)
for _, val := range vals {
@@ -118,6 +154,10 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
pairs = append(pairs, "authorization", val)
}
if h, ok := mux.incomingHeaderMatcher(key); ok {
+ if !isValidGRPCMetadataKey(h) {
+ grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h)
+ continue
+ }
// Handles "-bin" metadata in grpc, since grpc will do another base64
// encode before sending to server, we need to decode it first.
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
@@ -127,6 +167,9 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
}
val = string(b)
+ } else if !isValidGRPCMetadataTextValue(val) {
+ grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h)
+ continue
}
pairs = append(pairs, h, val)
}
@@ -172,11 +215,17 @@ type serverMetadataKey struct{}
// NewServerMetadataContext creates a new context with ServerMetadata
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+ if ctx == nil {
+ ctx = context.Background()
+ }
return context.WithValue(ctx, serverMetadataKey{}, md)
}
// ServerMetadataFromContext returns the ServerMetadata in ctx
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+ if ctx == nil {
+ return md, false
+ }
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
return
}
@@ -269,8 +318,8 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
case 'n':
return time.Nanosecond, true
default:
+ return
}
- return
}
// isPermanentHTTPHeader checks whether hdr belongs to the list of
@@ -308,6 +357,13 @@ func isPermanentHTTPHeader(hdr string) bool {
return false
}
+// isMalformedHTTPHeader checks whether header belongs to the list of
+// "malformed headers" and would be rejected by the gRPC server.
+func isMalformedHTTPHeader(header string) bool {
+ _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
+ return isMalformed
+}
+
// RPCMethod returns the method string for the server context. The returned
// string is in the format of "/package.service/method".
func RPCMethod(ctx context.Context) (string, bool) {
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
index e6bc4e6ceece..d7b15fcfb3f8 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
@@ -37,7 +37,7 @@ func BoolSlice(val, sep string) ([]bool, error) {
for i, v := range s {
value, err := Bool(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -57,7 +57,7 @@ func Float64Slice(val, sep string) ([]float64, error) {
for i, v := range s {
value, err := Float64(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -81,7 +81,7 @@ func Float32Slice(val, sep string) ([]float32, error) {
for i, v := range s {
value, err := Float32(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -101,7 +101,7 @@ func Int64Slice(val, sep string) ([]int64, error) {
for i, v := range s {
value, err := Int64(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -125,7 +125,7 @@ func Int32Slice(val, sep string) ([]int32, error) {
for i, v := range s {
value, err := Int32(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -145,7 +145,7 @@ func Uint64Slice(val, sep string) ([]uint64, error) {
for i, v := range s {
value, err := Uint64(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -169,7 +169,7 @@ func Uint32Slice(val, sep string) ([]uint32, error) {
for i, v := range s {
value, err := Uint32(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -197,7 +197,7 @@ func BytesSlice(val, sep string) ([][]byte, error) {
for i, v := range s {
value, err := Bytes(v)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
@@ -209,8 +209,7 @@ func Timestamp(val string) (*timestamppb.Timestamp, error) {
var r timestamppb.Timestamp
val = strconv.Quote(strings.Trim(val, `"`))
unmarshaler := &protojson.UnmarshalOptions{}
- err := unmarshaler.Unmarshal([]byte(val), &r)
- if err != nil {
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
return nil, err
}
return &r, nil
@@ -221,8 +220,7 @@ func Duration(val string) (*durationpb.Duration, error) {
var r durationpb.Duration
val = strconv.Quote(strings.Trim(val, `"`))
unmarshaler := &protojson.UnmarshalOptions{}
- err := unmarshaler.Unmarshal([]byte(val), &r)
- if err != nil {
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
return nil, err
}
return &r, nil
@@ -257,66 +255,64 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
for i, v := range s {
value, err := Enum(v, enumValMap)
if err != nil {
- return values, err
+ return nil, err
}
values[i] = value
}
return values, nil
}
-/*
- Support fot google.protobuf.wrappers on top of primitive types
-*/
+// Support for google.protobuf.wrappers on top of primitive types
// StringValue well-known type support as wrapper around string type
func StringValue(val string) (*wrapperspb.StringValue, error) {
- return &wrapperspb.StringValue{Value: val}, nil
+ return wrapperspb.String(val), nil
}
// FloatValue well-known type support as wrapper around float32 type
func FloatValue(val string) (*wrapperspb.FloatValue, error) {
parsedVal, err := Float32(val)
- return &wrapperspb.FloatValue{Value: parsedVal}, err
+ return wrapperspb.Float(parsedVal), err
}
// DoubleValue well-known type support as wrapper around float64 type
func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
parsedVal, err := Float64(val)
- return &wrapperspb.DoubleValue{Value: parsedVal}, err
+ return wrapperspb.Double(parsedVal), err
}
// BoolValue well-known type support as wrapper around bool type
func BoolValue(val string) (*wrapperspb.BoolValue, error) {
parsedVal, err := Bool(val)
- return &wrapperspb.BoolValue{Value: parsedVal}, err
+ return wrapperspb.Bool(parsedVal), err
}
// Int32Value well-known type support as wrapper around int32 type
func Int32Value(val string) (*wrapperspb.Int32Value, error) {
parsedVal, err := Int32(val)
- return &wrapperspb.Int32Value{Value: parsedVal}, err
+ return wrapperspb.Int32(parsedVal), err
}
// UInt32Value well-known type support as wrapper around uint32 type
func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
parsedVal, err := Uint32(val)
- return &wrapperspb.UInt32Value{Value: parsedVal}, err
+ return wrapperspb.UInt32(parsedVal), err
}
// Int64Value well-known type support as wrapper around int64 type
func Int64Value(val string) (*wrapperspb.Int64Value, error) {
parsedVal, err := Int64(val)
- return &wrapperspb.Int64Value{Value: parsedVal}, err
+ return wrapperspb.Int64(parsedVal), err
}
// UInt64Value well-known type support as wrapper around uint64 type
func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
parsedVal, err := Uint64(val)
- return &wrapperspb.UInt64Value{Value: parsedVal}, err
+ return wrapperspb.UInt64(parsedVal), err
}
// BytesValue well-known type support as wrapper around bytes[] type
func BytesValue(val string) (*wrapperspb.BytesValue, error) {
parsedVal, err := Bytes(val)
- return &wrapperspb.BytesValue{Value: parsedVal}, err
+ return wrapperspb.Bytes(parsedVal), err
}
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
index d9e0013c4392..d2bcbb7d2a20 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -38,7 +38,7 @@ func HTTPStatusFromCode(code codes.Code) int {
case codes.OK:
return http.StatusOK
case codes.Canceled:
- return http.StatusRequestTimeout
+ return 499
case codes.Unknown:
return http.StatusInternalServerError
case codes.InvalidArgument:
@@ -70,10 +70,10 @@ func HTTPStatusFromCode(code codes.Code) int {
return http.StatusServiceUnavailable
case codes.DataLoss:
return http.StatusInternalServerError
+ default:
+ grpclog.Infof("Unknown gRPC error code: %v", code)
+ return http.StatusInternalServerError
}
-
- grpclog.Infof("Unknown gRPC error code: %v", code)
- return http.StatusInternalServerError
}
// HTTPError uses the mux-configured error handler.
@@ -162,10 +162,11 @@ func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
// DefaultRoutingErrorHandler is our default handler for routing errors.
// By default http error codes mapped on the following error codes:
-// NotFound -> grpc.NotFound
-// StatusBadRequest -> grpc.InvalidArgument
-// MethodNotAllowed -> grpc.Unimplemented
-// Other -> grpc.Internal, method is not expecting to be called for anything else
+//
+// NotFound -> grpc.NotFound
+// StatusBadRequest -> grpc.InvalidArgument
+// MethodNotAllowed -> grpc.Unimplemented
+// Other -> grpc.Internal, method is not expecting to be called for anything else
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
sterr := status.Error(codes.Internal, "Unexpected routing error")
switch httpStatus {
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
index 0138ed2f769f..cd49097fde17 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -2,6 +2,7 @@ package runtime
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"sort"
@@ -44,7 +45,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
// if the item is an object, then enqueue all of its children
for k, v := range m {
if item.msg == nil {
- return nil, fmt.Errorf("JSON structure did not match request type")
+ return nil, errors.New("JSON structure did not match request type")
}
fd := getFieldByName(item.msg.Descriptor().Fields(), k)
@@ -53,7 +54,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
}
if isDynamicProtoMessage(fd.Message()) {
- for _, p := range buildPathsBlindly(k, v) {
+ for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
newPath := p
if item.path != "" {
newPath = item.path + "." + newPath
@@ -63,7 +64,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
continue
}
- if isProtobufAnyMessage(fd.Message()) {
+ if isProtobufAnyMessage(fd.Message()) && !fd.IsList() {
_, hasTypeField := v.(map[string]interface{})["@type"]
if hasTypeField {
queue = append(queue, fieldMaskPathItem{path: k})
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
index d1e21df4810a..945f3a5ebf3a 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -52,11 +52,11 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
return
}
if err != nil {
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
@@ -82,15 +82,15 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
if err != nil {
grpclog.Infof("Failed to marshal response chunk: %v", err)
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
- if _, err = w.Write(buf); err != nil {
+ if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to send response chunk: %v", err)
return
}
wroteHeader = true
- if _, err = w.Write(delimiter); err != nil {
+ if _, err := w.Write(delimiter); err != nil {
grpclog.Infof("Failed to send delimiter chunk: %v", err)
return
}
@@ -200,20 +200,24 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
return nil
}
-func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
st := mux.streamErrorHandler(ctx, err)
msg := errorChunk(st)
if !wroteHeader {
w.Header().Set("Content-Type", marshaler.ContentType(msg))
w.WriteHeader(HTTPStatusFromCode(st.Code()))
}
- buf, merr := marshaler.Marshal(msg)
- if merr != nil {
- grpclog.Infof("Failed to marshal an error: %v", merr)
+ buf, err := marshaler.Marshal(msg)
+ if err != nil {
+ grpclog.Infof("Failed to marshal an error: %v", err)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Infof("Failed to notify error to client: %v", err)
return
}
- if _, werr := w.Write(buf); werr != nil {
- grpclog.Infof("Failed to notify error to client: %v", werr)
+ if _, err := w.Write(delimiter); err != nil {
+ grpclog.Infof("Failed to send delimiter chunk: %v", err)
return
}
}
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
index 7387c8e39769..51b8247da2a0 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -92,23 +92,20 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
if rv.Type().Elem().Implements(protoMessageType) {
var buf bytes.Buffer
- err := buf.WriteByte('[')
- if err != nil {
+ if err := buf.WriteByte('['); err != nil {
return nil, err
}
for i := 0; i < rv.Len(); i++ {
if i != 0 {
- err = buf.WriteByte(',')
- if err != nil {
+ if err := buf.WriteByte(','); err != nil {
return nil, err
}
}
- if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+ if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
return nil, err
}
}
- err = buf.WriteByte(']')
- if err != nil {
+ if err := buf.WriteByte(']'); err != nil {
return nil, err
}
@@ -117,17 +114,16 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
if rv.Type().Elem().Implements(typeProtoEnum) {
var buf bytes.Buffer
- err := buf.WriteByte('[')
- if err != nil {
+ if err := buf.WriteByte('['); err != nil {
return nil, err
}
for i := 0; i < rv.Len(); i++ {
if i != 0 {
- err = buf.WriteByte(',')
- if err != nil {
+ if err := buf.WriteByte(','); err != nil {
return nil, err
}
}
+ var err error
if j.UseEnumNumbers {
_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
} else {
@@ -137,8 +133,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
return nil, err
}
}
- err = buf.WriteByte(']')
- if err != nil {
+ if err := buf.WriteByte(']'); err != nil {
return nil, err
}
@@ -219,8 +214,7 @@ func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v int
// Decode into bytes for marshalling
var b json.RawMessage
- err := d.Decode(&b)
- if err != nil {
+ if err := d.Decode(&b); err != nil {
return err
}
@@ -239,8 +233,7 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions
if rv.Type().ConvertibleTo(typeProtoMessage) {
// Decode into bytes for marshalling
var b json.RawMessage
- err := d.Decode(&b)
- if err != nil {
+ if err := d.Decode(&b); err != nil {
return err
}
@@ -280,6 +273,17 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions
return nil
}
if rv.Kind() == reflect.Slice {
+ if rv.Type().Elem().Kind() == reflect.Uint8 {
+ var sl []byte
+ if err := d.Decode(&sl); err != nil {
+ return err
+ }
+ if sl != nil {
+ rv.SetBytes(sl)
+ }
+ return nil
+ }
+
var sl []json.RawMessage
if err := d.Decode(&sl); err != nil {
return err
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
index 007f8f1a2c7f..398c780dc226 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
@@ -1,10 +1,8 @@
package runtime
import (
- "io"
-
"errors"
- "io/ioutil"
+ "io"
"google.golang.org/protobuf/proto"
)
@@ -38,7 +36,7 @@ func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
// NewDecoder returns a Decoder which reads proto stream from "reader".
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
return DecoderFunc(func(value interface{}) error {
- buffer, err := ioutil.ReadAll(reader)
+ buffer, err := io.ReadAll(reader)
if err != nil {
return err
}
@@ -53,8 +51,7 @@ func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
if err != nil {
return err
}
- _, err = writer.Write(buffer)
- if err != nil {
+ if _, err := writer.Write(buffer); err != nil {
return err
}
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
index 46a4aabaf955..139bbbad49ca 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -6,10 +6,13 @@ import (
"fmt"
"net/http"
"net/textproto"
+ "regexp"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
@@ -23,15 +26,15 @@ const (
// path string before doing any routing.
UnescapingModeLegacy UnescapingMode = iota
- // EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
+ // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
// reserved characters.
UnescapingModeAllExceptReserved
- // EscapingTypeExceptSlash unescapes URL path parameters except path
- // seperators, which will be left as "%2F".
+ // UnescapingModeAllExceptSlash unescapes URL path parameters except path
+ // separators, which will be left as "%2F".
UnescapingModeAllExceptSlash
- // URL path parameters will be fully decoded.
+ // UnescapingModeAllCharacters unescapes all URL path parameters.
UnescapingModeAllCharacters
// UnescapingModeDefault is the default escaping type.
@@ -40,6 +43,8 @@ const (
UnescapingModeDefault = UnescapingModeLegacy
)
+var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
+
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
@@ -75,7 +80,7 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http.
}
}
-// WithEscapingType sets the escaping type. See the definitions of UnescapingMode
+// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode
// for more information.
func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
return func(serveMux *ServeMux) {
@@ -96,13 +101,14 @@ func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMux
type HeaderMatcherFunc func(string) (string, bool)
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
-// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
-// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
+// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function.
+// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'.
+// Other headers are not added to the gRPC metadata.
func DefaultHeaderMatcher(key string) (string, bool) {
- key = textproto.CanonicalMIMEHeaderKey(key)
- if isPermanentHTTPHeader(key) {
+ switch key = textproto.CanonicalMIMEHeaderKey(key); {
+ case isPermanentHTTPHeader(key):
return MetadataPrefix + key, true
- } else if strings.HasPrefix(key, MetadataHeaderPrefix) {
+ case strings.HasPrefix(key, MetadataHeaderPrefix):
return key[len(MetadataHeaderPrefix):], true
}
return "", false
@@ -113,11 +119,30 @@ func DefaultHeaderMatcher(key string) (string, bool) {
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ for _, header := range fn.matchedMalformedHeaders() {
+ grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
+ }
+
return func(mux *ServeMux) {
mux.incomingHeaderMatcher = fn
}
}
+// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
+func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
+ if fn == nil {
+ return nil
+ }
+ headers := make([]string, 0)
+ for header := range malformedHTTPHeaders {
+ out, accept := fn(header)
+ if accept && isMalformedHTTPHeader(out) {
+ headers = append(headers, out)
+ }
+ }
+ return headers
+}
+
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
//
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
@@ -179,6 +204,56 @@ func WithDisablePathLengthFallback() ServeMuxOption {
}
}
+// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
+// When called the handler will forward the request to the upstream grpc service health check (defined in the
+// gRPC Health Checking Protocol).
+//
+// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
+// to setup the protocol in the grpc server.
+//
+// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
+func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
+ return func(s *ServeMux) {
+ // error can be ignored since pattern is definitely valid
+ _ = s.HandlePath(
+ http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
+ ) {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+
+ resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
+ Service: r.URL.Query().Get("service"),
+ })
+ if err != nil {
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+
+ if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
+ switch resp.GetStatus() {
+ case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
+ err = status.Error(codes.Unavailable, resp.String())
+ case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
+ err = status.Error(codes.NotFound, resp.String())
+ }
+
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ _ = outboundMarshaler.NewEncoder(w).Encode(resp)
+ })
+ }
+}
+
+// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
+//
+// See WithHealthEndpointAt for the general implementation.
+func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
+ return WithHealthEndpointAt(healthCheckClient, "/healthz")
+}
+
// NewServeMux returns a new ServeMux whose internal mapping is empty.
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
serveMux := &ServeMux{
@@ -229,7 +304,7 @@ func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) er
return nil
}
-// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -245,8 +320,6 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
path = r.URL.RawPath
}
- components := strings.Split(path[1:], "/")
-
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil {
@@ -257,8 +330,18 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
- // Verb out here is to memoize for the fallback case below
- var verb string
+ var pathComponents []string
+ // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
+ // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
+ // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
+ // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
+ if s.unescapingMode == UnescapingModeAllCharacters {
+ pathComponents = encodedPathSplitter.Split(path[1:], -1)
+ } else {
+ pathComponents = strings.Split(path[1:], "/")
+ }
+
+ lastPathComponent := pathComponents[len(pathComponents)-1]
for _, h := range s.handlers[r.Method] {
// If the pattern has a verb, explicitly look for a suffix in the last
@@ -269,23 +352,28 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// parser because we know what verb we're looking for, however, there
// are still some cases that the parser itself cannot disambiguate. See
// the comment there if interested.
+
+ var verb string
patVerb := h.pat.Verb()
- l := len(components)
- lastComponent := components[l-1]
- var idx int = -1
- if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
- idx = len(lastComponent) - len(patVerb) - 1
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
}
if idx == 0 {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
return
}
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
if idx > 0 {
- components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
}
- pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
if err != nil {
var mse MalformedSequenceError
if ok := errors.As(err, &mse); ok {
@@ -308,7 +396,22 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
continue
}
for _, h := range handlers {
- pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
+ var verb string
+ patVerb := h.pat.Verb()
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
+ }
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
+ if idx > 0 {
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+ }
+
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
if err != nil {
var mse MalformedSequenceError
if ok := errors.As(err, &mse); ok {
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
index df7cb81426a0..8f90d15a5620 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
@@ -15,8 +15,6 @@ var (
ErrNotMatch = errors.New("not match to the path pattern")
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
ErrInvalidPattern = errors.New("invalid pattern")
- // ErrMalformedSequence indicates that an escape sequence was malformed.
- ErrMalformedSequence = errors.New("malformed escape sequence")
)
type MalformedSequenceError string
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
index fb0c84ef0cdd..31ce33a76214 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -1,7 +1,6 @@
package runtime
import (
- "encoding/base64"
"errors"
"fmt"
"net/url"
@@ -13,17 +12,19 @@ import (
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/known/durationpb"
+ "google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
)
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
-var currentQueryParser QueryParameterParser = &defaultQueryParser{}
+var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
// QueryParameterParser defines interface for all query parameter parsers
type QueryParameterParser interface {
@@ -36,14 +37,17 @@ func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utili
return currentQueryParser.Parse(msg, values, filter)
}
-type defaultQueryParser struct{}
+// DefaultQueryParser is a QueryParameterParser which implements the default
+// query parameters parsing behavior.
+//
+// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
+type DefaultQueryParser struct{}
// Parse populates "values" into "msg".
// A value is ignored if its key starts with one of the elements in "filter".
-func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
for key, values := range values {
- match := valuesKeyRegexp.FindStringSubmatch(key)
- if len(match) == 3 {
+ if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 {
key = match[1]
values = append([]string{match[2]}, values...)
}
@@ -175,10 +179,10 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
return protoreflect.ValueOfBool(v), nil
case protoreflect.EnumKind:
enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
- switch {
- case errors.Is(err, protoregistry.NotFound):
- return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
- case err != nil:
+ if err != nil {
+ if errors.Is(err, protoregistry.NotFound) {
+ return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
+ }
return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
}
// Look for enum by name
@@ -189,8 +193,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
}
// Look for enum by number
- v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i))
- if v == nil {
+ if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil {
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
}
}
@@ -234,7 +237,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
case protoreflect.StringKind:
return protoreflect.ValueOfString(value), nil
case protoreflect.BytesKind:
- v, err := base64.URLEncoding.DecodeString(value)
+ v, err := Bytes(value)
if err != nil {
return protoreflect.Value{}, err
}
@@ -250,18 +253,12 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
var msg proto.Message
switch msgDescriptor.FullName() {
case "google.protobuf.Timestamp":
- if value == "null" {
- break
- }
t, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return protoreflect.Value{}, err
}
msg = timestamppb.New(t)
case "google.protobuf.Duration":
- if value == "null" {
- break
- }
d, err := time.ParseDuration(value)
if err != nil {
return protoreflect.Value{}, err
@@ -272,55 +269,67 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.DoubleValue{Value: v}
+ msg = wrapperspb.Double(v)
case "google.protobuf.FloatValue":
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.FloatValue{Value: float32(v)}
+ msg = wrapperspb.Float(float32(v))
case "google.protobuf.Int64Value":
v, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.Int64Value{Value: v}
+ msg = wrapperspb.Int64(v)
case "google.protobuf.Int32Value":
v, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.Int32Value{Value: int32(v)}
+ msg = wrapperspb.Int32(int32(v))
case "google.protobuf.UInt64Value":
v, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.UInt64Value{Value: v}
+ msg = wrapperspb.UInt64(v)
case "google.protobuf.UInt32Value":
v, err := strconv.ParseUint(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.UInt32Value{Value: uint32(v)}
+ msg = wrapperspb.UInt32(uint32(v))
case "google.protobuf.BoolValue":
v, err := strconv.ParseBool(value)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.BoolValue{Value: v}
+ msg = wrapperspb.Bool(v)
case "google.protobuf.StringValue":
- msg = &wrapperspb.StringValue{Value: value}
+ msg = wrapperspb.String(value)
case "google.protobuf.BytesValue":
- v, err := base64.URLEncoding.DecodeString(value)
+ v, err := Bytes(value)
if err != nil {
return protoreflect.Value{}, err
}
- msg = &wrapperspb.BytesValue{Value: v}
+ msg = wrapperspb.Bytes(v)
case "google.protobuf.FieldMask":
fm := &field_mask.FieldMask{}
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
msg = fm
+ case "google.protobuf.Value":
+ var v structpb.Value
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
+ case "google.protobuf.Struct":
+ var v structpb.Struct
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
default:
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
}
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
index 5d8d12bc4211..b89409465773 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
@@ -8,6 +8,7 @@ go_library(
"doc.go",
"pattern.go",
"readerfactory.go",
+ "string_array_flag.go",
"trie.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
@@ -16,7 +17,10 @@ go_library(
go_test(
name = "utilities_test",
size = "small",
- srcs = ["trie_test.go"],
+ srcs = [
+ "string_array_flag_test.go",
+ "trie_test.go",
+ ],
deps = [":utilities"],
)
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
index 6dd3854665f1..01d26edae3c9 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
@@ -3,13 +3,12 @@ package utilities
import (
"bytes"
"io"
- "io/ioutil"
)
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
// at the start of the stream
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
- b, err := ioutil.ReadAll(r)
+ b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
new file mode 100644
index 000000000000..d224ab776c0c
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
@@ -0,0 +1,33 @@
+package utilities
+
+import (
+ "flag"
+ "strings"
+)
+
+// flagInterface is an cut down interface to `flag`
+type flagInterface interface {
+ Var(value flag.Value, name string, usage string)
+}
+
+// StringArrayFlag defines a flag with the specified name and usage string.
+// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
+func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
+ value := &StringArrayFlags{}
+ f.Var(value, name, usage)
+ return value
+}
+
+// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
+type StringArrayFlags []string
+
+// String returns a string representation of `StringArrayFlags`
+func (i *StringArrayFlags) String() string {
+ return strings.Join(*i, ",")
+}
+
+// Set appends a value to `StringArrayFlags`
+func (i *StringArrayFlags) Set(value string) error {
+ *i = append(*i, value)
+ return nil
+}
diff --git a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
index af3b703d5057..dd99b0ed2562 100644
--- a/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
+++ b/cluster-autoscaler/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
@@ -40,7 +40,7 @@ func NewDoubleArray(seqs [][]string) *DoubleArray {
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
var result [][]int
for _, seq := range seqs {
- var encoded []int
+ encoded := make([]int, 0, len(seq))
for _, token := range seq {
if _, ok := da.Encoding[token]; !ok {
da.Encoding[token] = len(da.Encoding)
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/.deepsource.toml b/cluster-autoscaler/vendor/github.com/imdario/mergo/.deepsource.toml
new file mode 100644
index 000000000000..8a0681af8559
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/.deepsource.toml
@@ -0,0 +1,12 @@
+version = 1
+
+test_patterns = [
+ "*_test.go"
+]
+
+[[analyzers]]
+name = "go"
+enabled = true
+
+ [analyzers.meta]
+ import_path = "github.com/imdario/mergo"
\ No newline at end of file
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/.travis.yml b/cluster-autoscaler/vendor/github.com/imdario/mergo/.travis.yml
index b13a50ed1fb8..d324c43ba4df 100644
--- a/cluster-autoscaler/vendor/github.com/imdario/mergo/.travis.yml
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/.travis.yml
@@ -1,7 +1,12 @@
language: go
+arch:
+ - amd64
+ - ppc64le
install:
- go get -t
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
+ - go test -race -v ./...
+after_script:
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/cluster-autoscaler/vendor/github.com/imdario/mergo/CONTRIBUTING.md
new file mode 100644
index 000000000000..0a1ff9f94d85
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/CONTRIBUTING.md
@@ -0,0 +1,112 @@
+
+# Contributing to mergo
+
+First off, thanks for taking the time to contribute! ❤️
+
+All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
+
+> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
+> - Star the project
+> - Tweet about it
+> - Refer this project in your project's readme
+> - Mention the project at local meetups and tell your friends/colleagues
+
+
+## Table of Contents
+
+- [Code of Conduct](#code-of-conduct)
+- [I Have a Question](#i-have-a-question)
+- [I Want To Contribute](#i-want-to-contribute)
+- [Reporting Bugs](#reporting-bugs)
+- [Suggesting Enhancements](#suggesting-enhancements)
+
+## Code of Conduct
+
+This project and everyone participating in it is governed by the
+[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
+By participating, you are expected to uphold this code. Please report unacceptable behavior
+to <>.
+
+
+## I Have a Question
+
+> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
+
+Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
+
+If you then still feel the need to ask a question and need clarification, we recommend the following:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new).
+- Provide as much context as you can about what you're running into.
+- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
+
+We will then take care of the issue as soon as possible.
+
+## I Want To Contribute
+
+> ### Legal Notice
+> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
+
+### Reporting Bugs
+
+
+#### Before Submitting a Bug Report
+
+A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
+
+- Make sure that you are using the latest version.
+- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
+- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
+- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
+- Collect information about the bug:
+- Stack trace (Traceback)
+- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
+- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
+- Possibly your input and the output
+- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
+
+
+#### How Do I Submit a Good Bug Report?
+
+> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
+
+
+We use GitHub issues to track bugs and errors. If you run into an issue with the project:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
+- Explain the behavior you would expect and the actual behavior.
+- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
+- Provide the information you collected in the previous section.
+
+Once it's filed:
+
+- The project team will label the issue accordingly.
+- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
+- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
+
+### Suggesting Enhancements
+
+This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
+
+
+#### Before Submitting an Enhancement
+
+- Make sure that you are using the latest version.
+- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
+- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
+- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
+
+
+#### How Do I Submit a Good Enhancement Suggestion?
+
+Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
+
+- Use a **clear and descriptive title** for the issue to identify the suggestion.
+- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
+- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
+- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux.
+- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
+
+
+## Attribution
+This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/README.md b/cluster-autoscaler/vendor/github.com/imdario/mergo/README.md
index 8b76f1fbf33a..4f0287498574 100644
--- a/cluster-autoscaler/vendor/github.com/imdario/mergo/README.md
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/README.md
@@ -1,48 +1,59 @@
# Mergo
-A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-
-Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
-
-## Status
-
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
-
[![GoDoc][3]][4]
-[![GoCard][5]][6]
+[![GitHub release][5]][6]
+[![GoCard][7]][8]
[![Build Status][1]][2]
-[![Coverage Status][7]][8]
-[![Sourcegraph][9]][10]
+[![Coverage Status][9]][10]
+[![Sourcegraph][11]][12]
+[![FOSSA Status][13]][14]
+[![Become my sponsor][15]][16]
+[![Tidelift][17]][18]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
-[5]: https://goreportcard.com/badge/imdario/mergo
-[6]: https://goreportcard.com/report/github.com/imdario/mergo
-[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
-[8]: https://coveralls.io/github/imdario/mergo?branch=master
-[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
-[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[5]: https://img.shields.io/github/release/imdario/mergo.svg
+[6]: https://github.com/imdario/mergo/releases
+[7]: https://goreportcard.com/badge/imdario/mergo
+[8]: https://goreportcard.com/report/github.com/imdario/mergo
+[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[10]: https://coveralls.io/github/imdario/mergo?branch=master
+[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
+[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
+[15]: https://img.shields.io/github/sponsors/imdario
+[16]: https://github.com/sponsors/imdario
+[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
+[18]: https://tidelift.com/subscription/pkg/go-github.aaakk.us.kg-imdario-mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-### Latest release
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
-[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6).
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note
-Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
+
+Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
-If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
### Donations
-If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
+If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
-[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
-[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
+
### Mergo in the wild
@@ -86,8 +97,11 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
+- [containerssh/containerssh](https://github.com/containerssh/containerssh)
+- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+- [tjpnz/structbot](https://github.com/tjpnz/structbot)
-## Installation
+## Install
go get github.com/imdario/mergo
@@ -98,7 +112,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
## Usage
-You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
```go
if err := mergo.Merge(&dst, src); err != nil {
@@ -124,9 +138,7 @@ if err := mergo.Map(&dst, srcMap); err != nil {
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
-More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
-
-### Nice example
+Here is a nice example:
```go
package main
@@ -158,7 +170,7 @@ func main() {
Note: if test are failing due missing package, please execute:
- go get gopkg.in/yaml.v2
+ go get gopkg.in/yaml.v3
### Transformers
@@ -174,10 +186,10 @@ import (
"time"
)
-type timeTransfomer struct {
+type timeTransformer struct {
}
-func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ == reflect.TypeOf(time.Time{}) {
return func(dst, src reflect.Value) error {
if dst.CanSet() {
@@ -201,14 +213,13 @@ type Snapshot struct {
func main() {
src := Snapshot{time.Now()}
dest := Snapshot{}
- mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
fmt.Println(dest)
// Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
}
```
-
## Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
@@ -220,3 +231,6 @@ Written by [Dario Castañé](http://dario.im).
## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/SECURITY.md b/cluster-autoscaler/vendor/github.com/imdario/mergo/SECURITY.md
new file mode 100644
index 000000000000..a5de61f77ba7
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/SECURITY.md
@@ -0,0 +1,14 @@
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.3.x | :white_check_mark: |
+| < 0.3 | :x: |
+
+## Security contact information
+
+To report a security vulnerability, please use the
+[Tidelift security contact](https://tidelift.com/security).
+Tidelift will coordinate the fix and disclosure.
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/doc.go b/cluster-autoscaler/vendor/github.com/imdario/mergo/doc.go
index 6e9aa7baf354..fcd985f995dc 100644
--- a/cluster-autoscaler/vendor/github.com/imdario/mergo/doc.go
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/doc.go
@@ -4,41 +4,140 @@
// license that can be found in the LICENSE file.
/*
-Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Status
+
+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
+
+Important note
+
+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
+
+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+Install
+
+Do your usual installation procedure:
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
Usage
-From my own work-in-progress project:
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Also, you can merge overwriting values using the transformer WithOverride.
+
+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+Here is a nice example:
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ )
- type networkConfig struct {
- Protocol string
- Address string
- ServerType string `json: "server_type"`
- Port uint16
+ type Foo struct {
+ A string
+ B int64
}
- type FssnConfig struct {
- Network networkConfig
+ func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
}
- var fssnDefault = FssnConfig {
- networkConfig {
- "tcp",
- "127.0.0.1",
- "http",
- 31560,
- },
+Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+ )
+
+ type timeTransformer struct {
}
- // Inside a function [...]
+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+ }
+
+ type Snapshot struct {
+ Time time.Time
+ // ...
+ }
- if err := mergo.Merge(&config, fssnDefault); err != nil {
- log.Fatal(err)
+ func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
}
- // More code [...]
+Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
+
+About
+
+Written by Dario Castañé: https://da.rio.hn
+
+License
+
+BSD 3-Clause license, as Go language.
*/
package mergo
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/map.go b/cluster-autoscaler/vendor/github.com/imdario/mergo/map.go
index 6ea38e636b64..b50d5c2a4e7c 100644
--- a/cluster-autoscaler/vendor/github.com/imdario/mergo/map.go
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/map.go
@@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
}
// Remember, remember...
- visited[h] = &visit{addr, typ, seen}
+ visited[h] = &visit{typ, seen, addr}
}
zeroValue := reflect.Value{}
switch dst.Kind() {
@@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+ if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
dstMap[fieldName] = src.Field(i).Interface()
}
}
@@ -72,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
case reflect.Struct:
srcMap := src.Interface().(map[string]interface{})
for key := range srcMap {
+ config.overwriteWithEmptyValue = true
srcValue := srcMap[key]
fieldName := changeInitialCase(key, unicode.ToUpper)
dstElement := dst.FieldByName(fieldName)
@@ -140,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
}
func _map(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
var (
vDst, vSrc reflect.Value
err error
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/merge.go b/cluster-autoscaler/vendor/github.com/imdario/mergo/merge.go
index 44f70a89d919..0ef9b2138c15 100644
--- a/cluster-autoscaler/vendor/github.com/imdario/mergo/merge.go
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/merge.go
@@ -13,22 +13,40 @@ import (
"reflect"
)
-func hasExportedField(dst reflect.Value) (exported bool) {
+func hasMergeableFields(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ {
field := dst.Type().Field(i)
if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
- exported = exported || hasExportedField(dst.Field(i))
- } else {
+ exported = exported || hasMergeableFields(dst.Field(i))
+ } else if isExportedComponent(&field) {
exported = exported || len(field.PkgPath) == 0
}
}
return
}
+func isExportedComponent(field *reflect.StructField) bool {
+ pkgPath := field.PkgPath
+ if len(pkgPath) > 0 {
+ return false
+ }
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ return false
+ }
+ return true
+}
+
type Config struct {
- Overwrite bool
- AppendSlice bool
- Transformers Transformers
+ Transformers Transformers
+ Overwrite bool
+ ShouldNotDereference bool
+ AppendSlice bool
+ TypeCheck bool
+ overwriteWithEmptyValue bool
+ overwriteSliceWithEmptyValue bool
+ sliceDeepCopy bool
+ debug bool
}
type Transformers interface {
@@ -40,6 +58,10 @@ type Transformers interface {
// short circuiting on recursive types.
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
+ typeCheck := config.TypeCheck
+ overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
+ sliceDeepCopy := config.sliceDeepCopy
if !src.IsValid() {
return
@@ -55,10 +77,10 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
}
// Remember, remember...
- visited[h] = &visit{addr, typ, seen}
+ visited[h] = &visit{typ, seen, addr}
}
- if config.Transformers != nil && !isEmptyValue(dst) {
+ if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src)
return
@@ -67,21 +89,34 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
switch dst.Kind() {
case reflect.Struct:
- if hasExportedField(dst) {
+ if hasMergeableFields(dst) {
for i, n := 0, dst.NumField(); i < n; i++ {
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
return
}
}
} else {
- if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+ if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
dst.Set(src)
}
}
case reflect.Map:
if dst.IsNil() && !src.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
+ if dst.CanSet() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ } else {
+ dst = src
+ return
+ }
+ }
+
+ if src.Kind() != reflect.Map {
+ if overwrite && dst.CanSet() {
+ dst.Set(src)
+ }
+ return
}
+
for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key)
if !srcElement.IsValid() {
@@ -91,6 +126,9 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
switch srcElement.Kind() {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
if srcElement.IsNil() {
+ if overwrite {
+ dst.SetMapIndex(key, srcElement)
+ }
continue
}
fallthrough
@@ -125,54 +163,116 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dstSlice = reflect.ValueOf(dstElement.Interface())
}
- if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
dstSlice = srcSlice
} else if config.AppendSlice {
if srcSlice.Type() != dstSlice.Type() {
- return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ } else if sliceDeepCopy {
+ i := 0
+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
+ srcElement := srcSlice.Index(i)
+ dstElement := dstSlice.Index(i)
+
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+
}
dst.SetMapIndex(key, dstSlice)
}
}
- if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map {
- continue
+
+ if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
+ continue
+ }
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
+ continue
+ }
}
- if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
dst.SetMapIndex(key, srcElement)
}
}
+
+ // Ensure that all keys in dst are deleted if they are not in src.
+ if overwriteWithEmptySrc {
+ for _, key := range dst.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ dst.SetMapIndex(key, reflect.Value{})
+ }
+ }
+ }
case reflect.Slice:
if !dst.CanSet() {
break
}
- if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
dst.Set(src)
} else if config.AppendSlice {
if src.Type() != dst.Type() {
return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
}
dst.Set(reflect.AppendSlice(dst, src))
+ } else if sliceDeepCopy {
+ for i := 0; i < src.Len() && i < dst.Len(); i++ {
+ srcElement := src.Index(i)
+ dstElement := dst.Index(i)
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
}
case reflect.Ptr:
fallthrough
case reflect.Interface:
- if src.IsNil() {
+ if isReflectNil(src) {
+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
break
}
+
if src.Kind() != reflect.Interface {
- if dst.IsNil() || overwrite {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src)
}
} else if src.Kind() == reflect.Ptr {
- if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
+ if !config.ShouldNotDereference {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
+ dst.Set(src)
+ }
}
} else if dst.Elem().Type() == src.Type() {
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
@@ -183,18 +283,31 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
break
}
+
if dst.IsNil() || overwrite {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src)
}
- } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
+ break
+ }
+
+ if dst.Elem().Kind() == src.Elem().Kind() {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ break
}
default:
- if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
+ mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
+ if mustSet {
+ if dst.CanSet() {
+ dst.Set(src)
+ } else {
+ dst = src
+ }
}
}
+
return
}
@@ -206,7 +319,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error {
return merge(dst, src, opts...)
}
-// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
// non-empty src attribute values.
// Deprecated: use Merge(…) with WithOverride
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
@@ -225,12 +338,43 @@ func WithOverride(config *Config) {
config.Overwrite = true
}
-// WithAppendSlice will make merge append slices instead of overwriting it
+// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
+func WithOverwriteWithEmptyValue(config *Config) {
+ config.Overwrite = true
+ config.overwriteWithEmptyValue = true
+}
+
+// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
+func WithOverrideEmptySlice(config *Config) {
+ config.overwriteSliceWithEmptyValue = true
+}
+
+// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
+// (i.e. a non-nil pointer is never considered empty).
+func WithoutDereference(config *Config) {
+ config.ShouldNotDereference = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it.
func WithAppendSlice(config *Config) {
config.AppendSlice = true
}
+// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
+func WithTypeCheck(config *Config) {
+ config.TypeCheck = true
+}
+
+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
+func WithSliceDeepCopy(config *Config) {
+ config.sliceDeepCopy = true
+ config.Overwrite = true
+}
+
func merge(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
var (
vDst, vSrc reflect.Value
err error
@@ -250,3 +394,16 @@ func merge(dst, src interface{}, opts ...func(*Config)) error {
}
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
}
+
+// IsReflectNil is the reflect value provided nil
+func isReflectNil(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return v.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/imdario/mergo/mergo.go b/cluster-autoscaler/vendor/github.com/imdario/mergo/mergo.go
index a82fea2fdccc..0a721e2d8586 100644
--- a/cluster-autoscaler/vendor/github.com/imdario/mergo/mergo.go
+++ b/cluster-autoscaler/vendor/github.com/imdario/mergo/mergo.go
@@ -17,9 +17,10 @@ import (
var (
ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
- ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+ ErrNonPointerArgument = errors.New("dst must be a pointer")
)
// During deepMerge, must keep track of checks that are
@@ -27,13 +28,13 @@ var (
// checks in progress are true when it reencounters them.
// Visited are stored in a map indexed by 17 * a1 + a2;
type visit struct {
- ptr uintptr
typ reflect.Type
next *visit
+ ptr uintptr
}
// From src/pkg/encoding/json/encode.go.
-func isEmptyValue(v reflect.Value) bool {
+func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
@@ -49,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool {
if v.IsNil() {
return true
}
- return isEmptyValue(v.Elem())
+ if shouldDereference {
+ return isEmptyValue(v.Elem(), shouldDereference)
+ }
+ return false
case reflect.Func:
return v.IsNil()
case reflect.Invalid:
@@ -64,7 +68,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
return
}
vDst = reflect.ValueOf(dst).Elem()
- if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported
return
}
@@ -75,23 +79,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
}
return
}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
- return // TODO refactor
-}
diff --git a/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_others.go
index 9d2d8a4bab92..06a91f0868b8 100644
--- a/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_others.go
+++ b/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package mousetrap
diff --git a/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
index 336142a5e3ec..0c5688021649 100644
--- a/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
+++ b/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -1,81 +1,32 @@
-// +build windows
-// +build !go1.4
-
package mousetrap
import (
- "fmt"
- "os"
"syscall"
"unsafe"
)
-const (
- // defined by the Win32 API
- th32cs_snapprocess uintptr = 0x2
-)
-
-var (
- kernel = syscall.MustLoadDLL("kernel32.dll")
- CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
- Process32First = kernel.MustFindProc("Process32FirstW")
- Process32Next = kernel.MustFindProc("Process32NextW")
-)
-
-// ProcessEntry32 structure defined by the Win32 API
-type processEntry32 struct {
- dwSize uint32
- cntUsage uint32
- th32ProcessID uint32
- th32DefaultHeapID int
- th32ModuleID uint32
- cntThreads uint32
- th32ParentProcessID uint32
- pcPriClassBase int32
- dwFlags uint32
- szExeFile [syscall.MAX_PATH]uint16
-}
-
-func getProcessEntry(pid int) (pe *processEntry32, err error) {
- snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
- if snapshot == uintptr(syscall.InvalidHandle) {
- err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
- return
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
}
- defer syscall.CloseHandle(syscall.Handle(snapshot))
-
- var processEntry processEntry32
- processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
- ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
- if ok == 0 {
- err = fmt.Errorf("Process32First: %v", e1)
- return
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
}
-
for {
- if processEntry.th32ProcessID == uint32(pid) {
- pe = &processEntry
- return
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
}
-
- ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
- if ok == 0 {
- err = fmt.Errorf("Process32Next: %v", e1)
- return
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
}
}
}
-func getppid() (pid int, err error) {
- pe, err := getProcessEntry(os.Getpid())
- if err != nil {
- return
- }
-
- pid = int(pe.th32ParentProcessID)
- return
-}
-
// StartedByExplorer returns true if the program was invoked by the user double-clicking
// on the executable from explorer.exe
//
@@ -83,16 +34,9 @@ func getppid() (pid int, err error) {
// It does not guarantee that the program was run from a terminal. It only can tell you
// whether it was launched from explorer.exe
func StartedByExplorer() bool {
- ppid, err := getppid()
+ pe, err := getProcessEntry(syscall.Getppid())
if err != nil {
return false
}
-
- pe, err := getProcessEntry(ppid)
- if err != nil {
- return false
- }
-
- name := syscall.UTF16ToString(pe.szExeFile[:])
- return name == "explorer.exe"
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
}
diff --git a/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
deleted file mode 100644
index 9a28e57c3c30..000000000000
--- a/cluster-autoscaler/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build windows
-// +build go1.4
-
-package mousetrap
-
-import (
- "os"
- "syscall"
- "unsafe"
-)
-
-func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
- snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
- if err != nil {
- return nil, err
- }
- defer syscall.CloseHandle(snapshot)
- var procEntry syscall.ProcessEntry32
- procEntry.Size = uint32(unsafe.Sizeof(procEntry))
- if err = syscall.Process32First(snapshot, &procEntry); err != nil {
- return nil, err
- }
- for {
- if procEntry.ProcessID == uint32(pid) {
- return &procEntry, nil
- }
- err = syscall.Process32Next(snapshot, &procEntry)
- if err != nil {
- return nil, err
- }
- }
-}
-
-// StartedByExplorer returns true if the program was invoked by the user double-clicking
-// on the executable from explorer.exe
-//
-// It is conservative and returns false if any of the internal calls fail.
-// It does not guarantee that the program was run from a terminal. It only can tell you
-// whether it was launched from explorer.exe
-func StartedByExplorer() bool {
- pe, err := getProcessEntry(os.Getppid())
- if err != nil {
- return false
- }
- return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
-}
diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
new file mode 100644
index 000000000000..c758234904ec
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
@@ -0,0 +1,96 @@
+## 1.5.0
+
+* New option `IgnoreUntaggedFields` to ignore decoding to any fields
+ without `mapstructure` (or the configured tag name) set [GH-277]
+* New option `ErrorUnset` which makes it an error if any fields
+ in a target struct are not set by the decoding process. [GH-225]
+* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
+* Decoding to slice from array no longer crashes [GH-265]
+* Decode nested struct pointers to map [GH-271]
+* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
+* Fix issue where fields with `,omitempty` would sometimes decode
+ into a map with an empty string key [GH-281]
+
+## 1.4.3
+
+* Fix cases where `json.Number` didn't decode properly [GH-261]
+
+## 1.4.2
+
+* Custom name matchers to support any sort of casing, formatting, etc. for
+ field names. [GH-250]
+* Fix possible panic in ComposeDecodeHookFunc [GH-251]
+
+## 1.4.1
+
+* Fix regression where `*time.Time` value would be set to empty and not be sent
+ to decode hooks properly [GH-232]
+
+## 1.4.0
+
+* A new decode hook type `DecodeHookFuncValue` has been added that has
+ access to the full values. [GH-183]
+* Squash is now supported with embedded fields that are struct pointers [GH-205]
+* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
+
+## 1.3.3
+
+* Decoding maps from maps creates a settable value for decode hooks [GH-203]
+
+## 1.3.2
+
+* Decode into interface type with a struct value is supported [GH-187]
+
+## 1.3.1
+
+* Squash should only squash embedded structs. [GH-194]
+
+## 1.3.0
+
+* Added `",omitempty"` support. This will ignore zero values in the source
+ structure when encoding. [GH-145]
+
+## 1.2.3
+
+* Fix duplicate entries in Keys list with pointer values. [GH-185]
+
+## 1.2.2
+
+* Do not add unsettable (unexported) values to the unused metadata key
+ or "remain" value. [GH-150]
+
+## 1.2.1
+
+* Go modules checksum mismatch fix
+
+## 1.2.0
+
+* Added support to capture unused values in a field using the `",remain"` value
+ in the mapstructure tag. There is an example to showcase usage.
+* Added `DecoderConfig` option to always squash embedded structs
+* `json.Number` can decode into `uint` types
+* Empty slices are preserved and not replaced with nil slices
+* Fix panic that can occur in when decoding a map into a nil slice of structs
+* Improved package documentation for godoc
+
+## 1.1.2
+
+* Fix error when decode hook decodes interface implementation into interface
+ type. [GH-140]
+
+## 1.1.1
+
+* Fix panic that can happen in `decodePtr`
+
+## 1.1.0
+
+* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
+* Support struct to struct decoding [GH-137]
+* If source map value is nil, then destination map value is nil (instead of empty)
+* If source slice value is nil, then destination slice value is nil (instead of empty)
+* If source pointer is nil, then destination pointer is set to nil (instead of
+ allocated zero value of type)
+
+## 1.0.0
+
+* Initial tagged stable release.
diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/LICENSE b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644
index 000000000000..f9c841a51e0d
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/README.md b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/README.md
new file mode 100644
index 000000000000..0018dc7d9f94
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/README.md
@@ -0,0 +1,46 @@
+# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/mapstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+ "type": "person",
+ "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644
index 000000000000..3a754ca72484
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -0,0 +1,279 @@
+package mapstructure
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "net"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+ // Create variables here so we can reference them with the reflect pkg
+ var f1 DecodeHookFuncType
+ var f2 DecodeHookFuncKind
+ var f3 DecodeHookFuncValue
+
+ // Fill in the variables into this interface and the rest is done
+ // automatically using the reflect package.
+ potential := []interface{}{f1, f2, f3}
+
+ v := reflect.ValueOf(h)
+ vt := v.Type()
+ for _, raw := range potential {
+ pt := reflect.ValueOf(raw).Type()
+ if vt.ConvertibleTo(pt) {
+ return v.Convert(pt).Interface()
+ }
+ }
+
+ return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+ raw DecodeHookFunc,
+ from reflect.Value, to reflect.Value) (interface{}, error) {
+
+ switch f := typedDecodeHook(raw).(type) {
+ case DecodeHookFuncType:
+ return f(from.Type(), to.Type(), from.Interface())
+ case DecodeHookFuncKind:
+ return f(from.Kind(), to.Kind(), from.Interface())
+ case DecodeHookFuncValue:
+ return f(from, to)
+ default:
+ return nil, errors.New("invalid decode hook signature")
+ }
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ var err error
+ data := f.Interface()
+
+ newFrom := f
+ for _, f1 := range fs {
+ data, err = DecodeHookExec(f1, newFrom, t)
+ if err != nil {
+ return nil, err
+ }
+ newFrom = reflect.ValueOf(data)
+ }
+
+ return data, nil
+ }
+}
+
+// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
+// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
+func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
+ return func(a, b reflect.Value) (interface{}, error) {
+ var allErrs string
+ var out interface{}
+ var err error
+
+ for _, f := range ff {
+ out, err = DecodeHookExec(f, a, b)
+ if err != nil {
+ allErrs += err.Error() + "\n"
+ continue
+ }
+
+ return out, nil
+ }
+
+ return nil, errors.New(allErrs)
+ }
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+ return func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ if f != reflect.String || t != reflect.Slice {
+ return data, nil
+ }
+
+ raw := data.(string)
+ if raw == "" {
+ return []string{}, nil
+ }
+
+ return strings.Split(raw, sep), nil
+ }
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Duration(5)) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.ParseDuration(data.(string))
+ }
+}
+
+// StringToIPHookFunc returns a DecodeHookFunc that converts
+// strings to net.IP
+func StringToIPHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(net.IP{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ ip := net.ParseIP(data.(string))
+ if ip == nil {
+ return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
+ }
+
+ return ip, nil
+ }
+}
+
+// StringToIPNetHookFunc returns a DecodeHookFunc that converts
+// strings to net.IPNet
+func StringToIPNetHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(net.IPNet{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ _, net, err := net.ParseCIDR(data.(string))
+ return net, err
+ }
+}
+
+// StringToTimeHookFunc returns a DecodeHookFunc that converts
+// strings to time.Time.
+func StringToTimeHookFunc(layout string) DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Time{}) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.Parse(layout, data.(string))
+ }
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ dataVal := reflect.ValueOf(data)
+ switch t {
+ case reflect.String:
+ switch f {
+ case reflect.Bool:
+ if dataVal.Bool() {
+ return "1", nil
+ }
+ return "0", nil
+ case reflect.Float32:
+ return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+ case reflect.Int:
+ return strconv.FormatInt(dataVal.Int(), 10), nil
+ case reflect.Slice:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ if elemKind == reflect.Uint8 {
+ return string(dataVal.Interface().([]uint8)), nil
+ }
+ case reflect.Uint:
+ return strconv.FormatUint(dataVal.Uint(), 10), nil
+ }
+ }
+
+ return data, nil
+}
+
+func RecursiveStructToMapHookFunc() DecodeHookFunc {
+ return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+ if f.Kind() != reflect.Struct {
+ return f.Interface(), nil
+ }
+
+ var i interface{} = struct{}{}
+ if t.Type() != reflect.TypeOf(&i).Elem() {
+ return f.Interface(), nil
+ }
+
+ m := make(map[string]interface{})
+ t.Set(reflect.ValueOf(m))
+
+ return f.Interface(), nil
+ }
+}
+
+// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
+// strings to the UnmarshalText function, when the target type
+// implements the encoding.TextUnmarshaler interface
+func TextUnmarshallerHookFunc() DecodeHookFuncType {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ result := reflect.New(t).Interface()
+ unmarshaller, ok := result.(encoding.TextUnmarshaler)
+ if !ok {
+ return data, nil
+ }
+ if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/error.go b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644
index 000000000000..47a99e5af3f1
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/error.go
@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+ Errors []string
+}
+
+func (e *Error) Error() string {
+ points := make([]string, len(e.Errors))
+ for i, err := range e.Errors {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ sort.Strings(points)
+ return fmt.Sprintf(
+ "%d error(s) decoding:\n\n%s",
+ len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+ if e == nil {
+ return nil
+ }
+
+ result := make([]error, len(e.Errors))
+ for i, e := range e.Errors {
+ result[i] = errors.New(e)
+ }
+
+ return result
+}
+
+func appendErrors(errors []string, err error) []string {
+ switch e := err.(type) {
+ case *Error:
+ return append(errors, e.Errors...)
+ default:
+ return append(errors, e.Error())
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644
index 000000000000..1efb22ac3610
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -0,0 +1,1540 @@
+// Package mapstructure exposes functionality to convert one arbitrary
+// Go type into another, typically to convert a map[string]interface{}
+// into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+//
+// The simplest function to start with is Decode.
+//
+// Field Tags
+//
+// When decoding to a struct, mapstructure will use the field name by
+// default to perform the mapping. For example, if a struct has a field
+// "Username" then mapstructure will look for a key in the source value
+// of "username" (case insensitive).
+//
+// type User struct {
+// Username string
+// }
+//
+// You can change the behavior of mapstructure by using struct tags.
+// The default struct tag that mapstructure looks for is "mapstructure"
+// but you can customize it using DecoderConfig.
+//
+// Renaming Fields
+//
+// To rename the key that mapstructure looks for, use the "mapstructure"
+// tag and set a value directly. For example, to change the "username" example
+// above to "user":
+//
+// type User struct {
+// Username string `mapstructure:"user"`
+// }
+//
+// Embedded Structs and Squashing
+//
+// Embedded structs are treated as if they're another field with that name.
+// By default, the two structs below are equivalent when decoding with
+// mapstructure:
+//
+// type Person struct {
+// Name string
+// }
+//
+// type Friend struct {
+// Person
+// }
+//
+// type Friend struct {
+// Person Person
+// }
+//
+// This would require an input that looks like below:
+//
+// map[string]interface{}{
+// "person": map[string]interface{}{"name": "alice"},
+// }
+//
+// If your "person" value is NOT nested, then you can append ",squash" to
+// your tag value and mapstructure will treat it as if the embedded struct
+// were part of the struct directly. Example:
+//
+// type Friend struct {
+// Person `mapstructure:",squash"`
+// }
+//
+// Now the following input would be accepted:
+//
+// map[string]interface{}{
+// "name": "alice",
+// }
+//
+// When decoding from a struct to a map, the squash tag squashes the struct
+// fields into a single map. Using the example structs from above:
+//
+// Friend{Person: Person{Name: "alice"}}
+//
+// Will be decoded into a map:
+//
+// map[string]interface{}{
+// "name": "alice",
+// }
+//
+// DecoderConfig has a field that changes the behavior of mapstructure
+// to always squash embedded structs.
+//
+// Remainder Values
+//
+// If there are any unmapped keys in the source value, mapstructure by
+// default will silently ignore them. You can error by setting ErrorUnused
+// in DecoderConfig. If you're using Metadata you can also maintain a slice
+// of the unused keys.
+//
+// You can also use the ",remain" suffix on your tag to collect all unused
+// values in a map. The field with this tag MUST be a map type and should
+// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
+// See example below:
+//
+// type Friend struct {
+// Name string
+// Other map[string]interface{} `mapstructure:",remain"`
+// }
+//
+// Given the input below, Other would be populated with the other
+// values that weren't used (everything but "name"):
+//
+// map[string]interface{}{
+// "name": "bob",
+// "address": "123 Maple St.",
+// }
+//
+// Omit Empty Values
+//
+// When decoding from a struct to any other value, you may use the
+// ",omitempty" suffix on your tag to omit that value if it equates to
+// the zero value. The zero value of all types is specified in the Go
+// specification.
+//
+// For example, the zero type of a numeric type is zero ("0"). If the struct
+// field value is zero and a numeric type, the field is empty, and it won't
+// be encoded into the destination type.
+//
+// type Source struct {
+// Age int `mapstructure:",omitempty"`
+// }
+//
+// Unexported fields
+//
+// Since unexported (private) struct fields cannot be set outside the package
+// where they are defined, the decoder will simply skip them.
+//
+// For this output type definition:
+//
+// type Exported struct {
+// private string // this unexported field will be skipped
+// Public string
+// }
+//
+// Using this map as input:
+//
+// map[string]interface{}{
+// "private": "I will be ignored",
+// "Public": "I made it through!",
+// }
+//
+// The following struct will be decoded:
+//
+// type Exported struct {
+// private: "" // field is left with an empty string (zero value)
+// Public: "I made it through!"
+// }
+//
+// Other Configuration
+//
+// mapstructure is highly configurable. See the DecoderConfig struct
+// for other features and options that are supported.
+package mapstructure
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or
+// DecodeHookFuncValue.
+// Values are a superset of Types (Values can return types), and Types are a
+// superset of Kinds (Types can return Kinds) and are generally a richer thing
+// to use, but Kinds are simpler if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target
+// values.
+type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+ // DecodeHook, if set, will be called before any decoding and any
+ // type conversion (if WeaklyTypedInput is on). This lets you modify
+ // the values before they're set down onto the resulting struct. The
+ // DecodeHook is called for every map and value in the input. This means
+ // that if a struct has embedded fields with squash tags the decode hook
+ // is called only once with all of the input data, not once for each
+ // embedded struct.
+ //
+ // If an error is returned, the entire decode will fail with that error.
+ DecodeHook DecodeHookFunc
+
+ // If ErrorUnused is true, then it is an error for there to exist
+ // keys in the original map that were unused in the decoding process
+ // (extra keys).
+ ErrorUnused bool
+
+ // If ErrorUnset is true, then it is an error for there to exist
+ // fields in the result that were not set in the decoding process
+ // (extra fields). This only applies to decoding to a struct. This
+ // will affect all nested structs as well.
+ ErrorUnset bool
+
+ // ZeroFields, if set to true, will zero fields before writing them.
+ // For example, a map will be emptied before decoded values are put in
+ // it. If this is false, a map will be merged.
+ ZeroFields bool
+
+ // If WeaklyTypedInput is true, the decoder will make the following
+ // "weak" conversions:
+ //
+ // - bools to string (true = "1", false = "0")
+ // - numbers to string (base 10)
+ // - bools to int/uint (true = 1, false = 0)
+ // - strings to int/uint (base implied by prefix)
+ // - int to bool (true if value != 0)
+ // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+ // FALSE, false, False. Anything else is an error)
+ // - empty array = empty map and vice versa
+ // - negative numbers to overflowed uint values (base 10)
+ // - slice of maps to a merged map
+ // - single values are converted to slices if required. Each
+ // element is weakly decoded. For example: "4" can become []int{4}
+ // if the target type is an int slice.
+ //
+ WeaklyTypedInput bool
+
+ // Squash will squash embedded structs. A squash tag may also be
+ // added to an individual struct field using a tag. For example:
+ //
+ // type Parent struct {
+ // Child `mapstructure:",squash"`
+ // }
+ Squash bool
+
+ // Metadata is the struct that will contain extra metadata about
+ // the decoding. If this is nil, then no metadata will be tracked.
+ Metadata *Metadata
+
+ // Result is a pointer to the struct that will contain the decoded
+ // value.
+ Result interface{}
+
+ // The tag name that mapstructure reads for field names. This
+ // defaults to "mapstructure"
+ TagName string
+
+ // IgnoreUntaggedFields ignores all struct fields without explicit
+ // TagName, comparable to `mapstructure:"-"` as default behaviour.
+ IgnoreUntaggedFields bool
+
+ // MatchName is the function used to match the map key to the struct
+ // field name or tag. Defaults to `strings.EqualFold`. This can be used
+ // to implement case-sensitive tag values, support snake casing, etc.
+ MatchName func(mapKey, fieldName string) bool
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+ config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+ // Keys are the keys of the structure which were successfully decoded
+ Keys []string
+
+ // Unused is a slice of keys that were found in the raw value but
+ // weren't decoded since there was no matching field in the result interface
+ Unused []string
+
+ // Unset is a slice of field names that were found in the result interface
+ // but weren't set in the decoding process since there was no matching value
+ // in the input
+ Unset []string
+}
+
+// Decode takes an input structure and uses reflection to translate it to
+// the output structure. output must be a pointer to a map or struct.
+func Decode(input interface{}, output interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// DecodeMetadata is the same as Decode, but is shorthand to
+// enable metadata collection. See DecoderConfig for more info.
+func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+ config := &DecoderConfig{
+ Metadata: metadata,
+ Result: output,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// WeakDecodeMetadata is the same as Decode, but is shorthand to
+// enable both WeaklyTypedInput and metadata collection. See
+// DecoderConfig for more info.
+func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+ config := &DecoderConfig{
+ Metadata: metadata,
+ Result: output,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+ val := reflect.ValueOf(config.Result)
+ if val.Kind() != reflect.Ptr {
+ return nil, errors.New("result must be a pointer")
+ }
+
+ val = val.Elem()
+ if !val.CanAddr() {
+ return nil, errors.New("result must be addressable (a pointer)")
+ }
+
+ if config.Metadata != nil {
+ if config.Metadata.Keys == nil {
+ config.Metadata.Keys = make([]string, 0)
+ }
+
+ if config.Metadata.Unused == nil {
+ config.Metadata.Unused = make([]string, 0)
+ }
+
+ if config.Metadata.Unset == nil {
+ config.Metadata.Unset = make([]string, 0)
+ }
+ }
+
+ if config.TagName == "" {
+ config.TagName = "mapstructure"
+ }
+
+ if config.MatchName == nil {
+ config.MatchName = strings.EqualFold
+ }
+
+ result := &Decoder{
+ config: config,
+ }
+
+ return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(input interface{}) error {
+ return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
+ var inputVal reflect.Value
+ if input != nil {
+ inputVal = reflect.ValueOf(input)
+
+ // We need to check here if input is a typed nil. Typed nils won't
+ // match the "input == nil" below so we check that here.
+ if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
+ input = nil
+ }
+ }
+
+ if input == nil {
+ // If the data is nil, then we don't set anything, unless ZeroFields is set
+ // to true.
+ if d.config.ZeroFields {
+ outVal.Set(reflect.Zero(outVal.Type()))
+
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+ }
+ return nil
+ }
+
+ if !inputVal.IsValid() {
+ // If the input value is invalid, then we just set the value
+ // to be the zero value.
+ outVal.Set(reflect.Zero(outVal.Type()))
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+ return nil
+ }
+
+ if d.config.DecodeHook != nil {
+ // We have a DecodeHook, so let's pre-process the input.
+ var err error
+ input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal)
+ if err != nil {
+ return fmt.Errorf("error decoding '%s': %s", name, err)
+ }
+ }
+
+ var err error
+ outputKind := getKind(outVal)
+ addMetaKey := true
+ switch outputKind {
+ case reflect.Bool:
+ err = d.decodeBool(name, input, outVal)
+ case reflect.Interface:
+ err = d.decodeBasic(name, input, outVal)
+ case reflect.String:
+ err = d.decodeString(name, input, outVal)
+ case reflect.Int:
+ err = d.decodeInt(name, input, outVal)
+ case reflect.Uint:
+ err = d.decodeUint(name, input, outVal)
+ case reflect.Float32:
+ err = d.decodeFloat(name, input, outVal)
+ case reflect.Struct:
+ err = d.decodeStruct(name, input, outVal)
+ case reflect.Map:
+ err = d.decodeMap(name, input, outVal)
+ case reflect.Ptr:
+ addMetaKey, err = d.decodePtr(name, input, outVal)
+ case reflect.Slice:
+ err = d.decodeSlice(name, input, outVal)
+ case reflect.Array:
+ err = d.decodeArray(name, input, outVal)
+ case reflect.Func:
+ err = d.decodeFunc(name, input, outVal)
+ default:
+ // If we reached this point then we weren't able to decode it
+ return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
+ }
+
+ // If we reached here, then we successfully decoded SOMETHING, so
+ // mark the key as used if we're tracking metainput.
+ if addMetaKey && d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+
+ return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+ if val.IsValid() && val.Elem().IsValid() {
+ elem := val.Elem()
+
+ // If we can't address this element, then its not writable. Instead,
+ // we make a copy of the value (which is a pointer and therefore
+ // writable), decode into that, and replace the whole value.
+ copied := false
+ if !elem.CanAddr() {
+ copied = true
+
+ // Make *T
+ copy := reflect.New(elem.Type())
+
+ // *T = elem
+ copy.Elem().Set(elem)
+
+ // Set elem so we decode into it
+ elem = copy
+ }
+
+ // Decode. If we have an error then return. We also return right
+ // away if we're not a copy because that means we decoded directly.
+ if err := d.decode(name, data, elem); err != nil || !copied {
+ return err
+ }
+
+ // If we're a copy, we need to set te final result
+ val.Set(elem.Elem())
+ return nil
+ }
+
+ dataVal := reflect.ValueOf(data)
+
+ // If the input data is a pointer, and the assigned type is the dereference
+ // of that exact pointer, then indirect it so that we can assign it.
+ // Example: *string to string
+ if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
+ dataVal = reflect.Indirect(dataVal)
+ }
+
+ if !dataVal.IsValid() {
+ dataVal = reflect.Zero(val.Type())
+ }
+
+ dataValType := dataVal.Type()
+ if !dataValType.AssignableTo(val.Type()) {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got '%s'",
+ name, val.Type(), dataValType)
+ }
+
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+
+ converted := true
+ switch {
+ case dataKind == reflect.String:
+ val.SetString(dataVal.String())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetString("1")
+ } else {
+ val.SetString("0")
+ }
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+ case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
+ dataKind == reflect.Array && d.config.WeaklyTypedInput:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ switch elemKind {
+ case reflect.Uint8:
+ var uints []uint8
+ if dataKind == reflect.Array {
+ uints = make([]uint8, dataVal.Len(), dataVal.Len())
+ for i := range uints {
+ uints[i] = dataVal.Index(i).Interface().(uint8)
+ }
+ } else {
+ uints = dataVal.Interface().([]uint8)
+ }
+ val.SetString(string(uints))
+ default:
+ converted = false
+ }
+ default:
+ converted = false
+ }
+
+ if !converted {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetInt(dataVal.Int())
+ case dataKind == reflect.Uint:
+ val.SetInt(int64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetInt(int64(dataVal.Float()))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetInt(1)
+ } else {
+ val.SetInt(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ i, err := strconv.ParseInt(str, 0, val.Type().Bits())
+ if err == nil {
+ val.SetInt(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Int64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetInt(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ i := dataVal.Int()
+ if i < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %d overflows uint",
+ name, i)
+ }
+ val.SetUint(uint64(i))
+ case dataKind == reflect.Uint:
+ val.SetUint(dataVal.Uint())
+ case dataKind == reflect.Float32:
+ f := dataVal.Float()
+ if f < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %f overflows uint",
+ name, f)
+ }
+ val.SetUint(uint64(f))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetUint(1)
+ } else {
+ val.SetUint(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ i, err := strconv.ParseUint(str, 0, val.Type().Bits())
+ if err == nil {
+ val.SetUint(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := strconv.ParseUint(string(jn), 0, 64)
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetUint(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Bool:
+ val.SetBool(dataVal.Bool())
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Int() != 0)
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Uint() != 0)
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Float() != 0)
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ b, err := strconv.ParseBool(dataVal.String())
+ if err == nil {
+ val.SetBool(b)
+ } else if dataVal.String() == "" {
+ val.SetBool(false)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetFloat(float64(dataVal.Int()))
+ case dataKind == reflect.Uint:
+ val.SetFloat(float64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetFloat(dataVal.Float())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetFloat(1)
+ } else {
+ val.SetFloat(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ str := dataVal.String()
+ if str == "" {
+ str = "0"
+ }
+
+ f, err := strconv.ParseFloat(str, val.Type().Bits())
+ if err == nil {
+ val.SetFloat(f)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Float64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetFloat(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+ valType := val.Type()
+ valKeyType := valType.Key()
+ valElemType := valType.Elem()
+
+ // By default we overwrite keys in the current map
+ valMap := val
+
+ // If the map is nil or we're purposely zeroing fields, make a new map
+ if valMap.IsNil() || d.config.ZeroFields {
+ // Make a new map to hold our result
+ mapType := reflect.MapOf(valKeyType, valElemType)
+ valMap = reflect.MakeMap(mapType)
+ }
+
+ // Check input type and based on the input type jump to the proper func
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ switch dataVal.Kind() {
+ case reflect.Map:
+ return d.decodeMapFromMap(name, dataVal, val, valMap)
+
+ case reflect.Struct:
+ return d.decodeMapFromStruct(name, dataVal, val, valMap)
+
+ case reflect.Array, reflect.Slice:
+ if d.config.WeaklyTypedInput {
+ return d.decodeMapFromSlice(name, dataVal, val, valMap)
+ }
+
+ fallthrough
+
+ default:
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+ }
+}
+
+func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ // Special case for BC reasons (covered by tests)
+ if dataVal.Len() == 0 {
+ val.Set(valMap)
+ return nil
+ }
+
+ for i := 0; i < dataVal.Len(); i++ {
+ err := d.decode(
+ name+"["+strconv.Itoa(i)+"]",
+ dataVal.Index(i).Interface(), val)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ valType := val.Type()
+ valKeyType := valType.Key()
+ valElemType := valType.Elem()
+
+ // Accumulate errors
+ errors := make([]string, 0)
+
+ // If the input data is empty, then we just match what the input data is.
+ if dataVal.Len() == 0 {
+ if dataVal.IsNil() {
+ if !val.IsNil() {
+ val.Set(dataVal)
+ }
+ } else {
+ // Set to empty allocated value
+ val.Set(valMap)
+ }
+
+ return nil
+ }
+
+ for _, k := range dataVal.MapKeys() {
+ fieldName := name + "[" + k.String() + "]"
+
+ // First decode the key into the proper type
+ currentKey := reflect.Indirect(reflect.New(valKeyType))
+ if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+ errors = appendErrors(errors, err)
+ continue
+ }
+
+ // Next decode the data into the proper type
+ v := dataVal.MapIndex(k).Interface()
+ currentVal := reflect.Indirect(reflect.New(valElemType))
+ if err := d.decode(fieldName, v, currentVal); err != nil {
+ errors = appendErrors(errors, err)
+ continue
+ }
+
+ valMap.SetMapIndex(currentKey, currentVal)
+ }
+
+ // Set the built up map to the value
+ val.Set(valMap)
+
+ // If we had errors, return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+ typ := dataVal.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ // Get the StructField first since this is a cheap operation. If the
+ // field is unexported, then ignore it.
+ f := typ.Field(i)
+ if f.PkgPath != "" {
+ continue
+ }
+
+ // Next get the actual value of this field and verify it is assignable
+ // to the map value.
+ v := dataVal.Field(i)
+ if !v.Type().AssignableTo(valMap.Type().Elem()) {
+ return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
+ }
+
+ tagValue := f.Tag.Get(d.config.TagName)
+ keyName := f.Name
+
+ if tagValue == "" && d.config.IgnoreUntaggedFields {
+ continue
+ }
+
+ // If Squash is set in the config, we squash the field down.
+ squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
+
+ v = dereferencePtrToStructIfNeeded(v, d.config.TagName)
+
+ // Determine the name of the key in the map
+ if index := strings.Index(tagValue, ","); index != -1 {
+ if tagValue[:index] == "-" {
+ continue
+ }
+ // If "omitempty" is specified in the tag, it ignores empty values.
+ if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
+ continue
+ }
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash = squash || strings.Index(tagValue[index+1:], "squash") != -1
+ if squash {
+ // When squashing, the embedded type can be a pointer to a struct.
+ if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
+ v = v.Elem()
+ }
+
+ // The final type must be a struct
+ if v.Kind() != reflect.Struct {
+ return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+ }
+ }
+ if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
+ keyName = keyNameTagValue
+ }
+ } else if len(tagValue) > 0 {
+ if tagValue == "-" {
+ continue
+ }
+ keyName = tagValue
+ }
+
+ switch v.Kind() {
+ // this is an embedded struct, so handle it differently
+ case reflect.Struct:
+ x := reflect.New(v.Type())
+ x.Elem().Set(v)
+
+ vType := valMap.Type()
+ vKeyType := vType.Key()
+ vElemType := vType.Elem()
+ mType := reflect.MapOf(vKeyType, vElemType)
+ vMap := reflect.MakeMap(mType)
+
+ // Creating a pointer to a map so that other methods can completely
+ // overwrite the map if need be (looking at you decodeMapFromMap). The
+ // indirection allows the underlying map to be settable (CanSet() == true)
+ // where as reflect.MakeMap returns an unsettable map.
+ addrVal := reflect.New(vMap.Type())
+ reflect.Indirect(addrVal).Set(vMap)
+
+ err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal))
+ if err != nil {
+ return err
+ }
+
+ // the underlying map may have been completely overwritten so pull
+ // it indirectly out of the enclosing value.
+ vMap = reflect.Indirect(addrVal)
+
+ if squash {
+ for _, k := range vMap.MapKeys() {
+ valMap.SetMapIndex(k, vMap.MapIndex(k))
+ }
+ } else {
+ valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
+ }
+
+ default:
+ valMap.SetMapIndex(reflect.ValueOf(keyName), v)
+ }
+ }
+
+ if val.CanAddr() {
+ val.Set(valMap)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) {
+ // If the input data is nil, then we want to just set the output
+ // pointer to be nil as well.
+ isNil := data == nil
+ if !isNil {
+ switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
+ case reflect.Chan,
+ reflect.Func,
+ reflect.Interface,
+ reflect.Map,
+ reflect.Ptr,
+ reflect.Slice:
+ isNil = v.IsNil()
+ }
+ }
+ if isNil {
+ if !val.IsNil() && val.CanSet() {
+ nilValue := reflect.New(val.Type()).Elem()
+ val.Set(nilValue)
+ }
+
+ return true, nil
+ }
+
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ valType := val.Type()
+ valElemType := valType.Elem()
+ if val.CanSet() {
+ realVal := val
+ if realVal.IsNil() || d.config.ZeroFields {
+ realVal = reflect.New(valElemType)
+ }
+
+ if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+ return false, err
+ }
+
+ val.Set(realVal)
+ } else {
+ if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
+ return false, err
+ }
+ }
+ return false, nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ if val.Type() != dataVal.Type() {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+ name, val.Type(), dataVal.Type(), data)
+ }
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ sliceType := reflect.SliceOf(valElemType)
+
+ // If we have a non array/slice type then we first attempt to convert.
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Slice and array we use the normal logic
+ case dataValKind == reflect.Slice, dataValKind == reflect.Array:
+ break
+
+ // Empty maps turn into empty slices
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.MakeSlice(sliceType, 0, 0))
+ return nil
+ }
+ // Create slice of maps of other sizes
+ return d.decodeSlice(name, []interface{}{data}, val)
+
+ case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
+ return d.decodeSlice(name, []byte(dataVal.String()), val)
+
+ // All other types we try to convert to the slice type
+ // and "lift" it into it. i.e. a string becomes a string slice.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeSlice(name, []interface{}{data}, val)
+ }
+ }
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ }
+
+ // If the input value is nil, then don't allocate since empty != nil
+ if dataValKind != reflect.Array && dataVal.IsNil() {
+ return nil
+ }
+
+ valSlice := val
+ if valSlice.IsNil() || d.config.ZeroFields {
+ // Make a new slice to hold our result, same size as the original data.
+ valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+ }
+
+ // Accumulate any errors
+ errors := make([]string, 0)
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ for valSlice.Len() <= i {
+ valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+ }
+ currentField := valSlice.Index(i)
+
+ fieldName := name + "[" + strconv.Itoa(i) + "]"
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // Finally, set the value to the slice we built up
+ val.Set(valSlice)
+
+ // If there were errors, we return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ arrayType := reflect.ArrayOf(valType.Len(), valElemType)
+
+ valArray := val
+
+ if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+ // Check input type
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Empty maps turn into empty arrays
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.Zero(arrayType))
+ return nil
+ }
+
+ // All other types we try to convert to the array type
+ // and "lift" it into it. i.e. a string becomes a string array.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeArray(name, []interface{}{data}, val)
+ }
+ }
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+ }
+ if dataVal.Len() > arrayType.Len() {
+ return fmt.Errorf(
+ "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
+
+ }
+
+ // Make a new array to hold our result, same size as the original data.
+ valArray = reflect.New(arrayType).Elem()
+ }
+
+ // Accumulate any errors
+ errors := make([]string, 0)
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ currentField := valArray.Index(i)
+
+ fieldName := name + "[" + strconv.Itoa(i) + "]"
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // Finally, set the value to the array we built up
+ val.Set(valArray)
+
+ // If there were errors, we return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+ // If the type of the value to write to and the data match directly,
+ // then we just set it directly instead of recursing into the structure.
+ if dataVal.Type() == val.Type() {
+ val.Set(dataVal)
+ return nil
+ }
+
+ dataValKind := dataVal.Kind()
+ switch dataValKind {
+ case reflect.Map:
+ return d.decodeStructFromMap(name, dataVal, val)
+
+ case reflect.Struct:
+ // Not the most efficient way to do this but we can optimize later if
+ // we want to. To convert from struct to struct we go to map first
+ // as an intermediary.
+
+ // Make a new map to hold our result
+ mapType := reflect.TypeOf((map[string]interface{})(nil))
+ mval := reflect.MakeMap(mapType)
+
+ // Creating a pointer to a map so that other methods can completely
+ // overwrite the map if need be (looking at you decodeMapFromMap). The
+ // indirection allows the underlying map to be settable (CanSet() == true)
+ // where as reflect.MakeMap returns an unsettable map.
+ addrVal := reflect.New(mval.Type())
+
+ reflect.Indirect(addrVal).Set(mval)
+ if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil {
+ return err
+ }
+
+ result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val)
+ return result
+
+ default:
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+ }
+}
+
+func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
+ dataValType := dataVal.Type()
+ if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+ return fmt.Errorf(
+ "'%s' needs a map with string keys, has '%s' keys",
+ name, dataValType.Key().Kind())
+ }
+
+ dataValKeys := make(map[reflect.Value]struct{})
+ dataValKeysUnused := make(map[interface{}]struct{})
+ for _, dataValKey := range dataVal.MapKeys() {
+ dataValKeys[dataValKey] = struct{}{}
+ dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+ }
+
+ targetValKeysUnused := make(map[interface{}]struct{})
+ errors := make([]string, 0)
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = val
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+
+ // remainField is set to a valid field set with the "remain" tag if
+ // we are keeping track of remaining values.
+ var remainField *field
+
+ fields := []field{}
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ fieldVal := structVal.Field(i)
+ if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct {
+ // Handle embedded struct pointers as embedded structs.
+ fieldVal = fieldVal.Elem()
+ }
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous
+ remain := false
+
+ // We always parse the tags cause we're looking for other tags too
+ tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+
+ if tag == "remain" {
+ remain = true
+ break
+ }
+ }
+
+ if squash {
+ if fieldVal.Kind() != reflect.Struct {
+ errors = appendErrors(errors,
+ fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
+ } else {
+ structs = append(structs, fieldVal)
+ }
+ continue
+ }
+
+ // Build our field
+ if remain {
+ remainField = &field{fieldType, fieldVal}
+ } else {
+ // Normal struct field, store it away
+ fields = append(fields, field{fieldType, fieldVal})
+ }
+ }
+ }
+
+ // for fieldType, field := range fields {
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ fieldName := field.Name
+
+ tagValue := field.Tag.Get(d.config.TagName)
+ tagValue = strings.SplitN(tagValue, ",", 2)[0]
+ if tagValue != "" {
+ fieldName = tagValue
+ }
+
+ rawMapKey := reflect.ValueOf(fieldName)
+ rawMapVal := dataVal.MapIndex(rawMapKey)
+ if !rawMapVal.IsValid() {
+ // Do a slower search by iterating over each key and
+ // doing case-insensitive search.
+ for dataValKey := range dataValKeys {
+ mK, ok := dataValKey.Interface().(string)
+ if !ok {
+ // Not a string key
+ continue
+ }
+
+ if d.config.MatchName(mK, fieldName) {
+ rawMapKey = dataValKey
+ rawMapVal = dataVal.MapIndex(dataValKey)
+ break
+ }
+ }
+
+ if !rawMapVal.IsValid() {
+ // There was no matching key in the map for the value in
+ // the struct. Remember it for potential errors and metadata.
+ targetValKeysUnused[fieldName] = struct{}{}
+ continue
+ }
+ }
+
+ if !fieldValue.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !fieldValue.CanSet() {
+ continue
+ }
+
+ // Delete the key we're using from the unused map so we stop tracking
+ delete(dataValKeysUnused, rawMapKey.Interface())
+
+ // If the name is empty string, then we're at the root, and we
+ // don't dot-join the fields.
+ if name != "" {
+ fieldName = name + "." + fieldName
+ }
+
+ if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // If we have a "remain"-tagged field and we have unused keys then
+ // we put the unused keys directly into the remain field.
+ if remainField != nil && len(dataValKeysUnused) > 0 {
+ // Build a map of only the unused values
+ remain := map[interface{}]interface{}{}
+ for key := range dataValKeysUnused {
+ remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
+ }
+
+ // Decode it as-if we were just decoding this map onto our map.
+ if err := d.decodeMap(name, remain, remainField.val); err != nil {
+ errors = appendErrors(errors, err)
+ }
+
+ // Set the map to nil so we have none so that the next check will
+ // not error (ErrorUnused)
+ dataValKeysUnused = nil
+ }
+
+ if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+ keys := make([]string, 0, len(dataValKeysUnused))
+ for rawKey := range dataValKeysUnused {
+ keys = append(keys, rawKey.(string))
+ }
+ sort.Strings(keys)
+
+ err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+ errors = appendErrors(errors, err)
+ }
+
+ if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
+ keys := make([]string, 0, len(targetValKeysUnused))
+ for rawKey := range targetValKeysUnused {
+ keys = append(keys, rawKey.(string))
+ }
+ sort.Strings(keys)
+
+ err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
+ errors = appendErrors(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ // Add the unused keys to the list of unused keys if we're tracking metadata
+ if d.config.Metadata != nil {
+ for rawKey := range dataValKeysUnused {
+ key := rawKey.(string)
+ if name != "" {
+ key = name + "." + key
+ }
+
+ d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+ }
+ for rawKey := range targetValKeysUnused {
+ key := rawKey.(string)
+ if name != "" {
+ key = name + "." + key
+ }
+
+ d.config.Metadata.Unset = append(d.config.Metadata.Unset, key)
+ }
+ }
+
+ return nil
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch getKind(v) {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+ kind := val.Kind()
+
+ switch {
+ case kind >= reflect.Int && kind <= reflect.Int64:
+ return reflect.Int
+ case kind >= reflect.Uint && kind <= reflect.Uint64:
+ return reflect.Uint
+ case kind >= reflect.Float32 && kind <= reflect.Float64:
+ return reflect.Float32
+ default:
+ return kind
+ }
+}
+
+func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool {
+ for i := 0; i < typ.NumField(); i++ {
+ f := typ.Field(i)
+ if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields
+ return true
+ }
+ if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside
+ return true
+ }
+ }
+ return false
+}
+
+func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value {
+ if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+ return v
+ }
+ deref := v.Elem()
+ derefT := deref.Type()
+ if isStructTypeConvertibleToMap(derefT, true, tagName) {
+ return deref
+ }
+ return v
+}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/decode.go b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/decode.go
index 7657f841d632..f4fc88455221 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
- p TextParser
- fams []*dto.MetricFamily
+ fams map[string]*dto.MetricFamily
+ err error
}
// Decode implements the Decoder interface.
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- // TODO(fabxc): Wrap this as a line reader to make streaming safer.
- if len(d.fams) == 0 {
- // No cached metric families, read everything and parse metrics.
- fams, err := d.p.TextToMetricFamilies(d.r)
- if err != nil {
- return err
- }
- if len(fams) == 0 {
- return io.EOF
- }
- d.fams = make([]*dto.MetricFamily, 0, len(fams))
- for _, f := range fams {
- d.fams = append(d.fams, f)
+ if d.err == nil {
+ // Read all metrics in one shot.
+ var p TextParser
+ d.fams, d.err = p.TextToMetricFamilies(d.r)
+ // If we don't get an error, store io.EOF for the end.
+ if d.err == nil {
+ d.err = io.EOF
}
}
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
+ // Pick off one MetricFamily per Decode until there's nothing left.
+ for key, fam := range d.fams {
+ *v = *fam
+ delete(d.fams, key)
+ return nil
+ }
+ return d.err
}
// SampleDecoder wraps a Decoder to extract samples from the metric families
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/fuzz.go b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/fuzz.go
index f819e4f8b549..dfac962a4e7e 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -21,8 +21,8 @@ import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 9d94ae9effe3..21cdddcf0541 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -46,20 +46,20 @@ import (
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
-// line. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
+// - Counters are expected to have the `_total` suffix in their metric name. In
+// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
+// line. A counter with a missing `_total` suffix is not an error. However,
+// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+// output.
//
-// - No support for the following (optional) features: `# UNIT` line, `_created`
-// line, info type, stateset type, gaugehistogram type.
+// - No support for the following (optional) features: `# UNIT` line, `_created`
+// line, info type, stateset type, gaugehistogram type.
//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
+// - The size of exemplar labels is not checked (i.e. it's possible to create
+// exemplars that are larger than allowed by the OpenMetrics specification).
//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+// with a `NaN` value.)
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
name := in.GetName()
if name == "" {
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/text_create.go b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/text_create.go
index 5ba503b06547..2946b8f1a644 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -17,7 +17,6 @@ import (
"bufio"
"fmt"
"io"
- "io/ioutil"
"math"
"strconv"
"strings"
@@ -44,7 +43,7 @@ const (
var (
bufPool = sync.Pool{
New: func() interface{} {
- return bufio.NewWriter(ioutil.Discard)
+ return bufio.NewWriter(io.Discard)
},
}
numBufPool = sync.Pool{
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/text_parse.go b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 84be0643ec67..ac2482782c7b 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) {
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
if p.skipBlankTab(); p.err != nil {
- // End of input reached. This is the only case where
- // that is not an error but a signal that we are done.
- p.err = nil
+ // This is the only place that we expect to see io.EOF,
+ // which is not an error but the signal that we are done.
+ // Any other error that happens to align with the start of
+ // a line is still an error.
+ if p.err == io.EOF {
+ p.err = nil
+ }
return nil
}
switch p.currentByte {
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/cluster-autoscaler/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
index 26e92288c7c0..a21b9d15dd89 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
*/
package goautoneg
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/model/time.go b/cluster-autoscaler/vendor/github.com/prometheus/common/model/time.go
index c909b8aa8c50..5727452c1ee9 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/common/model/time.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/model/time.go
@@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"math"
- "regexp"
"strconv"
"strings"
"time"
@@ -183,54 +182,78 @@ func (d *Duration) Type() string {
return "duration"
}
-var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+ pos int
+ mult uint64
+}{
+ "ms": {7, uint64(time.Millisecond)},
+ "s": {6, uint64(time.Second)},
+ "m": {5, uint64(time.Minute)},
+ "h": {4, uint64(time.Hour)},
+ "d": {3, uint64(24 * time.Hour)},
+ "w": {2, uint64(7 * 24 * time.Hour)},
+ "y": {1, uint64(365 * 24 * time.Hour)},
+}
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
- switch durationStr {
+func ParseDuration(s string) (Duration, error) {
+ switch s {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
return 0, errors.New("empty duration string")
}
- matches := durationRE.FindStringSubmatch(durationStr)
- if matches == nil {
- return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
- }
- var dur time.Duration
- // Parse the match at pos `pos` in the regex and use `mult` to turn that
- // into ms, then add that value to the total parsed duration.
- var overflowErr error
- m := func(pos int, mult time.Duration) {
- if matches[pos] == "" {
- return
+ orig := s
+ var dur uint64
+ lastUnitPos := 0
+
+ for s != "" {
+ if !isdigit(s[0]) {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ // Consume [0-9]*
+ i := 0
+ for ; i < len(s) && isdigit(s[i]); i++ {
+ }
+ v, err := strconv.ParseUint(s[:i], 10, 0)
+ if err != nil {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
- n, _ := strconv.Atoi(matches[pos])
+ s = s[i:]
+ // Consume unit.
+ for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+ }
+ if i == 0 {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ u := s[:i]
+ s = s[i:]
+ unit, ok := unitMap[u]
+ if !ok {
+ return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+ }
+ if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ lastUnitPos = unit.pos
// Check if the provided duration overflows time.Duration (> ~ 290years).
- if n > int((1<<63-1)/mult/time.Millisecond) {
- overflowErr = errors.New("duration out of range")
+ if v > 1<<63/unit.mult {
+ return 0, errors.New("duration out of range")
}
- d := time.Duration(n) * time.Millisecond
- dur += d * mult
-
- if dur < 0 {
- overflowErr = errors.New("duration out of range")
+ dur += v * unit.mult
+ if dur > 1<<63-1 {
+ return 0, errors.New("duration out of range")
}
}
-
- m(2, 1000*60*60*24*365) // y
- m(4, 1000*60*60*24*7) // w
- m(6, 1000*60*60*24) // d
- m(8, 1000*60*60) // h
- m(10, 1000*60) // m
- m(12, 1000) // s
- m(14, 1) // ms
-
- return Duration(dur), overflowErr
+ return Duration(dur), nil
}
func (d Duration) String() string {
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/model/value.go b/cluster-autoscaler/vendor/github.com/prometheus/common/model/value.go
index c9d8fb1a2831..9eb440413fd3 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/common/model/value.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/model/value.go
@@ -16,20 +16,12 @@ package model
import (
"encoding/json"
"fmt"
- "math"
"sort"
"strconv"
"strings"
)
var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
@@ -38,82 +30,14 @@ var (
ZeroSample = Sample{Timestamp: Earliest}
)
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
-
-// Sample is a sample pair associated with a metric.
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+ Histogram *SampleHistogram `json:"histogram"`
}
// Equal compares first the metrics, then the timestamp, then the value. The
@@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
-
+ if s.Histogram != nil {
+ return s.Histogram.Equal(o.Histogram)
+ }
return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
+ if s.Histogram != nil {
+ return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ })
+ }
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
@@ -142,6 +74,19 @@ func (s Sample) String() string {
// MarshalJSON implements json.Marshaler.
func (s Sample) MarshalJSON() ([]byte, error) {
+ if s.Histogram != nil {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histogram SampleHistogramPair `json:"histogram"`
+ }{
+ Metric: s.Metric,
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
+ }
+ return json.Marshal(&v)
+ }
v := struct {
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
@@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) {
Value: s.Value,
},
}
-
return json.Marshal(&v)
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *Sample) UnmarshalJSON(b []byte) error {
v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ Histogram SampleHistogramPair `json:"histogram"`
}{
Metric: s.Metric,
Value: SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
},
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
}
if err := json.Unmarshal(b, &v); err != nil {
@@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
}
s.Metric = v.Metric
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
+ if v.Histogram.Histogram != nil {
+ s.Timestamp = v.Histogram.Timestamp
+ s.Histogram = v.Histogram.Histogram
+ } else {
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+ }
return nil
}
@@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool {
// SampleStream is a stream of Values belonging to an attached COWMetric.
type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
}
func (ss SampleStream) String() string {
- vals := make([]string, len(ss.Values))
+ valuesLength := len(ss.Values)
+ vals := make([]string, valuesLength+len(ss.Histograms))
for i, v := range ss.Values {
vals[i] = v.String()
}
+ for i, v := range ss.Histograms {
+ vals[i+valuesLength] = v.String()
+ }
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
}
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+ if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else if len(ss.Histograms) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ }
+ return json.Marshal(&v)
+ }
}
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
+ ss.Metric = v.Metric
+ ss.Values = v.Values
+ ss.Histograms = v.Histograms
+
+ return nil
}
// Scalar is a scalar value evaluated at the set timestamp.
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_float.go b/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 000000000000..0f615a705301
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,100 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_histogram.go b/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 000000000000..54bb038cfff3
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,178 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("float value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = FloatString(f)
+ return nil
+}
+
+type HistogramBucket struct {
+ Boundaries int32
+ Lower FloatString
+ Upper FloatString
+ Count FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(s.Boundaries)
+ if err != nil {
+ return nil, err
+ }
+ l, err := json.Marshal(s.Lower)
+ if err != nil {
+ return nil, err
+ }
+ u, err := json.Marshal(s.Upper)
+ if err != nil {
+ return nil, err
+ }
+ c, err := json.Marshal(s.Count)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+ return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (b HistogramBucket) String() string {
+ var sb strings.Builder
+ lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
+ upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
+ if lowerInclusive {
+ sb.WriteRune('[')
+ } else {
+ sb.WriteRune('(')
+ }
+ fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+ if upperInclusive {
+ sb.WriteRune(']')
+ } else {
+ sb.WriteRune(')')
+ }
+ fmt.Fprintf(&sb, ":%v", b.Count)
+ return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, bucket := range s {
+ if !bucket.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+type SampleHistogram struct {
+ Count FloatString `json:"count"`
+ Sum FloatString `json:"sum"`
+ Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+ return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+ return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+ Timestamp Time
+ // Histogram should never be nil, it's only stored as pointer for efficiency.
+ Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+ if s.Histogram == nil {
+ return nil, fmt.Errorf("histogram is nil")
+ }
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Histogram)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Timestamp, &s.Histogram}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ if s.Histogram == nil {
+ return fmt.Errorf("histogram is null")
+ }
+ return nil
+}
+
+func (s SampleHistogramPair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+ return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_type.go b/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 000000000000..726c50ee638c
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile.common b/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile.common
index 6c8e3e219797..e358db69c5d3 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/Makefile.common
@@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),)
endif
endif
-PROMU_VERSION ?= 0.13.0
+PROMU_VERSION ?= 0.14.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.45.2
+GOLANGCI_LINT_VERSION ?= v1.49.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
- ifeq (,$(CIRCLE_JOB))
+ ifneq (,$(SKIP_GOLANGCI_LINT))
+ GOLANGCI_LINT :=
+ else ifeq (,$(CIRCLE_JOB))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo.go
index ff6b927da159..06968ca2ed40 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
return cpuinfo, nil
}
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+ return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ systemType := field[1]
+ i := 0
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ cpuinfo[i].VendorID = systemType
+ case "CPU Family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "Model Name":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 000000000000..d88442f0edfd
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo_others.go
index ea41bf2ca1e2..a6b2b3127cb1 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
-// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/doc.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/doc.go
index d31a82600f67..f9d961e44179 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/doc.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/doc.go
@@ -16,30 +16,29 @@
//
// Example:
//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.Stat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.Stat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
package procfs
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/mountstats.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/mountstats.go
index f7a828bb1da7..0c482c18ccfe 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/mountstats.go
@@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-// device [device] mounted on [mount] with fstype [type]
+//
+// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss)
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_softnet.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_softnet.go
index a94f86dc4ae6..06b7b8f21638 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -27,8 +27,9 @@ import (
// For the proc file format details,
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
type SoftnetStat struct {
@@ -38,6 +39,18 @@ type SoftnetStat struct {
Dropped uint32
// Number of times processing packets ran out of quota.
TimeSqueezed uint32
+ // Number of collision occur while obtaining device lock while transmitting.
+ CPUCollision uint32
+ // Number of times cpu woken up received_rps.
+ ReceivedRps uint32
+ // number of times flow limit has been reached.
+ FlowLimitCount uint32
+ // Softnet backlog status.
+ SoftnetBacklogLen uint32
+ // CPU id owning this softnet_data.
+ Index uint32
+ // softnet_data's Width.
+ Width int
}
var softNetProcFile = "net/softnet_stat"
@@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
+ softnetStat := SoftnetStat{}
if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
}
- // We only parse the first three columns at the moment.
- us, err := parseHexUint32s(columns[0:3])
- if err != nil {
- return nil, err
+ // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+ if width >= minColumns {
+ us, err := parseHexUint32s(columns[0:9])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.Processed = us[0]
+ softnetStat.Dropped = us[1]
+ softnetStat.TimeSqueezed = us[2]
+ softnetStat.CPUCollision = us[8]
+ }
+
+ // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+ if width >= 10 {
+ us, err := parseHexUint32s(columns[9:10])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.ReceivedRps = us[0]
}
- stats = append(stats, SoftnetStat{
- Processed: us[0],
- Dropped: us[1],
- TimeSqueezed: us[2],
- })
+ // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+ if width >= 11 {
+ us, err := parseHexUint32s(columns[10:11])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.FlowLimitCount = us[0]
+ }
+
+ // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+ if width >= 13 {
+ us, err := parseHexUint32s(columns[11:13])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.SoftnetBacklogLen = us[0]
+ softnetStat.Index = us[1]
+ }
+ softnetStat.Width = width
+ stats = append(stats, softnetStat)
}
return stats, nil
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/netstat.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/netstat.go
index dcea9c5a671f..5cc40aef55bf 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/netstat.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/netstat.go
@@ -15,6 +15,7 @@ package procfs
import (
"bufio"
+ "io"
"os"
"path/filepath"
"strconv"
@@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) {
return nil, err
}
- netStatFile := NetStat{
- Filename: filepath.Base(filePath),
- Stats: make(map[string][]uint64),
+ procNetstat, err := parseNetstat(file)
+ if err != nil {
+ return nil, err
+ }
+ procNetstat.Filename = filepath.Base(filePath)
+
+ netStatsTotal = append(netStatsTotal, procNetstat)
+ }
+ return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(r io.Reader) (NetStat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ netStat = NetStat{
+ Stats: make(map[string][]uint64),
}
- scanner := bufio.NewScanner(file)
- scanner.Scan()
- // First string is always a header for stats
- var headers []string
- headers = append(headers, strings.Fields(scanner.Text())...)
+ )
+
+ scanner.Scan()
- // Other strings represent per-CPU counters
- for scanner.Scan() {
- for num, counter := range strings.Fields(scanner.Text()) {
- value, err := strconv.ParseUint(counter, 16, 64)
- if err != nil {
- return nil, err
- }
- netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
+ // First string is always a header for stats
+ var headers []string
+ headers = append(headers, strings.Fields(scanner.Text())...)
+
+ // Other strings represent per-CPU counters
+ for scanner.Scan() {
+ for num, counter := range strings.Fields(scanner.Text()) {
+ value, err := strconv.ParseUint(counter, 16, 64)
+ if err != nil {
+ return NetStat{}, err
}
+ netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
}
- netStatsTotal = append(netStatsTotal, netStatFile)
}
- return netStatsTotal, nil
+
+ return netStat, nil
}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_cgroup.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_cgroup.go
index cca03327c3fe..ea83a75ffc42 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -23,7 +23,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_interrupts.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 000000000000..9df79c237999
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+ // Info is the type of interrupt.
+ Info string
+ // Devices is the name of the device that is located at that IRQ
+ Devices string
+ // Values is the number of interrupts per CPU.
+ Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+ data, err := util.ReadFileNoStat(p.path("interrupts"))
+ if err != nil {
+ return nil, err
+ }
+ return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+ var (
+ interrupts = Interrupts{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return nil, errors.New("interrupts empty")
+ }
+ cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ if len(parts) == 0 { // skip empty lines
+ continue
+ }
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts)
+ }
+ intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+ if len(parts) == 2 {
+ interrupts[intName] = Interrupt{
+ Info: "",
+ Devices: "",
+ Values: []string{
+ parts[1],
+ },
+ }
+ continue
+ }
+
+ intr := Interrupt{
+ Values: parts[1 : cpuNum+1],
+ }
+
+ if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+ intr.Info = parts[cpuNum+1]
+ intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+ } else {
+ intr.Info = strings.Join(parts[cpuNum+1:], " ")
+ }
+ interrupts[intName] = intr
+ }
+
+ return interrupts, scanner.Err()
+}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_netstat.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_netstat.go
index 48b5238194e8..6a43bb245951 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -33,139 +33,140 @@ type ProcNetstat struct {
}
type TcpExt struct { // nolint:revive
- SyncookiesSent float64
- SyncookiesRecv float64
- SyncookiesFailed float64
- EmbryonicRsts float64
- PruneCalled float64
- RcvPruned float64
- OfoPruned float64
- OutOfWindowIcmps float64
- LockDroppedIcmps float64
- ArpFilter float64
- TW float64
- TWRecycled float64
- TWKilled float64
- PAWSActive float64
- PAWSEstab float64
- DelayedACKs float64
- DelayedACKLocked float64
- DelayedACKLost float64
- ListenOverflows float64
- ListenDrops float64
- TCPHPHits float64
- TCPPureAcks float64
- TCPHPAcks float64
- TCPRenoRecovery float64
- TCPSackRecovery float64
- TCPSACKReneging float64
- TCPSACKReorder float64
- TCPRenoReorder float64
- TCPTSReorder float64
- TCPFullUndo float64
- TCPPartialUndo float64
- TCPDSACKUndo float64
- TCPLossUndo float64
- TCPLostRetransmit float64
- TCPRenoFailures float64
- TCPSackFailures float64
- TCPLossFailures float64
- TCPFastRetrans float64
- TCPSlowStartRetrans float64
- TCPTimeouts float64
- TCPLossProbes float64
- TCPLossProbeRecovery float64
- TCPRenoRecoveryFail float64
- TCPSackRecoveryFail float64
- TCPRcvCollapsed float64
- TCPDSACKOldSent float64
- TCPDSACKOfoSent float64
- TCPDSACKRecv float64
- TCPDSACKOfoRecv float64
- TCPAbortOnData float64
- TCPAbortOnClose float64
- TCPAbortOnMemory float64
- TCPAbortOnTimeout float64
- TCPAbortOnLinger float64
- TCPAbortFailed float64
- TCPMemoryPressures float64
- TCPMemoryPressuresChrono float64
- TCPSACKDiscard float64
- TCPDSACKIgnoredOld float64
- TCPDSACKIgnoredNoUndo float64
- TCPSpuriousRTOs float64
- TCPMD5NotFound float64
- TCPMD5Unexpected float64
- TCPMD5Failure float64
- TCPSackShifted float64
- TCPSackMerged float64
- TCPSackShiftFallback float64
- TCPBacklogDrop float64
- PFMemallocDrop float64
- TCPMinTTLDrop float64
- TCPDeferAcceptDrop float64
- IPReversePathFilter float64
- TCPTimeWaitOverflow float64
- TCPReqQFullDoCookies float64
- TCPReqQFullDrop float64
- TCPRetransFail float64
- TCPRcvCoalesce float64
- TCPOFOQueue float64
- TCPOFODrop float64
- TCPOFOMerge float64
- TCPChallengeACK float64
- TCPSYNChallenge float64
- TCPFastOpenActive float64
- TCPFastOpenActiveFail float64
- TCPFastOpenPassive float64
- TCPFastOpenPassiveFail float64
- TCPFastOpenListenOverflow float64
- TCPFastOpenCookieReqd float64
- TCPFastOpenBlackhole float64
- TCPSpuriousRtxHostQueues float64
- BusyPollRxPackets float64
- TCPAutoCorking float64
- TCPFromZeroWindowAdv float64
- TCPToZeroWindowAdv float64
- TCPWantZeroWindowAdv float64
- TCPSynRetrans float64
- TCPOrigDataSent float64
- TCPHystartTrainDetect float64
- TCPHystartTrainCwnd float64
- TCPHystartDelayDetect float64
- TCPHystartDelayCwnd float64
- TCPACKSkippedSynRecv float64
- TCPACKSkippedPAWS float64
- TCPACKSkippedSeq float64
- TCPACKSkippedFinWait2 float64
- TCPACKSkippedTimeWait float64
- TCPACKSkippedChallenge float64
- TCPWinProbe float64
- TCPKeepAlive float64
- TCPMTUPFail float64
- TCPMTUPSuccess float64
- TCPWqueueTooBig float64
+ SyncookiesSent *float64
+ SyncookiesRecv *float64
+ SyncookiesFailed *float64
+ EmbryonicRsts *float64
+ PruneCalled *float64
+ RcvPruned *float64
+ OfoPruned *float64
+ OutOfWindowIcmps *float64
+ LockDroppedIcmps *float64
+ ArpFilter *float64
+ TW *float64
+ TWRecycled *float64
+ TWKilled *float64
+ PAWSActive *float64
+ PAWSEstab *float64
+ DelayedACKs *float64
+ DelayedACKLocked *float64
+ DelayedACKLost *float64
+ ListenOverflows *float64
+ ListenDrops *float64
+ TCPHPHits *float64
+ TCPPureAcks *float64
+ TCPHPAcks *float64
+ TCPRenoRecovery *float64
+ TCPSackRecovery *float64
+ TCPSACKReneging *float64
+ TCPSACKReorder *float64
+ TCPRenoReorder *float64
+ TCPTSReorder *float64
+ TCPFullUndo *float64
+ TCPPartialUndo *float64
+ TCPDSACKUndo *float64
+ TCPLossUndo *float64
+ TCPLostRetransmit *float64
+ TCPRenoFailures *float64
+ TCPSackFailures *float64
+ TCPLossFailures *float64
+ TCPFastRetrans *float64
+ TCPSlowStartRetrans *float64
+ TCPTimeouts *float64
+ TCPLossProbes *float64
+ TCPLossProbeRecovery *float64
+ TCPRenoRecoveryFail *float64
+ TCPSackRecoveryFail *float64
+ TCPRcvCollapsed *float64
+ TCPDSACKOldSent *float64
+ TCPDSACKOfoSent *float64
+ TCPDSACKRecv *float64
+ TCPDSACKOfoRecv *float64
+ TCPAbortOnData *float64
+ TCPAbortOnClose *float64
+ TCPAbortOnMemory *float64
+ TCPAbortOnTimeout *float64
+ TCPAbortOnLinger *float64
+ TCPAbortFailed *float64
+ TCPMemoryPressures *float64
+ TCPMemoryPressuresChrono *float64
+ TCPSACKDiscard *float64
+ TCPDSACKIgnoredOld *float64
+ TCPDSACKIgnoredNoUndo *float64
+ TCPSpuriousRTOs *float64
+ TCPMD5NotFound *float64
+ TCPMD5Unexpected *float64
+ TCPMD5Failure *float64
+ TCPSackShifted *float64
+ TCPSackMerged *float64
+ TCPSackShiftFallback *float64
+ TCPBacklogDrop *float64
+ PFMemallocDrop *float64
+ TCPMinTTLDrop *float64
+ TCPDeferAcceptDrop *float64
+ IPReversePathFilter *float64
+ TCPTimeWaitOverflow *float64
+ TCPReqQFullDoCookies *float64
+ TCPReqQFullDrop *float64
+ TCPRetransFail *float64
+ TCPRcvCoalesce *float64
+ TCPRcvQDrop *float64
+ TCPOFOQueue *float64
+ TCPOFODrop *float64
+ TCPOFOMerge *float64
+ TCPChallengeACK *float64
+ TCPSYNChallenge *float64
+ TCPFastOpenActive *float64
+ TCPFastOpenActiveFail *float64
+ TCPFastOpenPassive *float64
+ TCPFastOpenPassiveFail *float64
+ TCPFastOpenListenOverflow *float64
+ TCPFastOpenCookieReqd *float64
+ TCPFastOpenBlackhole *float64
+ TCPSpuriousRtxHostQueues *float64
+ BusyPollRxPackets *float64
+ TCPAutoCorking *float64
+ TCPFromZeroWindowAdv *float64
+ TCPToZeroWindowAdv *float64
+ TCPWantZeroWindowAdv *float64
+ TCPSynRetrans *float64
+ TCPOrigDataSent *float64
+ TCPHystartTrainDetect *float64
+ TCPHystartTrainCwnd *float64
+ TCPHystartDelayDetect *float64
+ TCPHystartDelayCwnd *float64
+ TCPACKSkippedSynRecv *float64
+ TCPACKSkippedPAWS *float64
+ TCPACKSkippedSeq *float64
+ TCPACKSkippedFinWait2 *float64
+ TCPACKSkippedTimeWait *float64
+ TCPACKSkippedChallenge *float64
+ TCPWinProbe *float64
+ TCPKeepAlive *float64
+ TCPMTUPFail *float64
+ TCPMTUPSuccess *float64
+ TCPWqueueTooBig *float64
}
type IpExt struct { // nolint:revive
- InNoRoutes float64
- InTruncatedPkts float64
- InMcastPkts float64
- OutMcastPkts float64
- InBcastPkts float64
- OutBcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InCsumErrors float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
- ReasmOverlaps float64
+ InNoRoutes *float64
+ InTruncatedPkts *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InBcastPkts *float64
+ OutBcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InCsumErrors *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+ ReasmOverlaps *float64
}
func (p Proc) Netstat() (ProcNetstat, error) {
@@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) {
if err != nil {
return ProcNetstat{PID: p.PID}, err
}
- procNetstat, err := parseNetstat(bytes.NewReader(data), filename)
+ procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
procNetstat.PID = p.PID
return procNetstat, err
}
-// parseNetstat parses the metrics from proc//net/netstat file
+// parseProcNetstat parses the metrics from proc//net/netstat file
// and returns a ProcNetstat structure.
-func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
var (
scanner = bufio.NewScanner(r)
procNetstat = ProcNetstat{}
@@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt":
switch key {
case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = value
+ procNetstat.TcpExt.SyncookiesSent = &value
case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = value
+ procNetstat.TcpExt.SyncookiesRecv = &value
case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = value
+ procNetstat.TcpExt.SyncookiesFailed = &value
case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = value
+ procNetstat.TcpExt.EmbryonicRsts = &value
case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = value
+ procNetstat.TcpExt.PruneCalled = &value
case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = value
+ procNetstat.TcpExt.RcvPruned = &value
case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = value
+ procNetstat.TcpExt.OfoPruned = &value
case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = value
+ procNetstat.TcpExt.OutOfWindowIcmps = &value
case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = value
+ procNetstat.TcpExt.LockDroppedIcmps = &value
case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = value
+ procNetstat.TcpExt.ArpFilter = &value
case "TW":
- procNetstat.TcpExt.TW = value
+ procNetstat.TcpExt.TW = &value
case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = value
+ procNetstat.TcpExt.TWRecycled = &value
case "TWKilled":
- procNetstat.TcpExt.TWKilled = value
+ procNetstat.TcpExt.TWKilled = &value
case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = value
+ procNetstat.TcpExt.PAWSActive = &value
case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = value
+ procNetstat.TcpExt.PAWSEstab = &value
case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = value
+ procNetstat.TcpExt.DelayedACKs = &value
case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = value
+ procNetstat.TcpExt.DelayedACKLocked = &value
case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = value
+ procNetstat.TcpExt.DelayedACKLost = &value
case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = value
+ procNetstat.TcpExt.ListenOverflows = &value
case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = value
+ procNetstat.TcpExt.ListenDrops = &value
case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = value
+ procNetstat.TcpExt.TCPHPHits = &value
case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = value
+ procNetstat.TcpExt.TCPPureAcks = &value
case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = value
+ procNetstat.TcpExt.TCPHPAcks = &value
case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = value
+ procNetstat.TcpExt.TCPRenoRecovery = &value
case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = value
+ procNetstat.TcpExt.TCPSackRecovery = &value
case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = value
+ procNetstat.TcpExt.TCPSACKReneging = &value
case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = value
+ procNetstat.TcpExt.TCPSACKReorder = &value
case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = value
+ procNetstat.TcpExt.TCPRenoReorder = &value
case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = value
+ procNetstat.TcpExt.TCPTSReorder = &value
case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = value
+ procNetstat.TcpExt.TCPFullUndo = &value
case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = value
+ procNetstat.TcpExt.TCPPartialUndo = &value
case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = value
+ procNetstat.TcpExt.TCPDSACKUndo = &value
case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = value
+ procNetstat.TcpExt.TCPLossUndo = &value
case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = value
+ procNetstat.TcpExt.TCPLostRetransmit = &value
case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = value
+ procNetstat.TcpExt.TCPRenoFailures = &value
case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = value
+ procNetstat.TcpExt.TCPSackFailures = &value
case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = value
+ procNetstat.TcpExt.TCPLossFailures = &value
case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = value
+ procNetstat.TcpExt.TCPFastRetrans = &value
case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = value
+ procNetstat.TcpExt.TCPSlowStartRetrans = &value
case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = value
+ procNetstat.TcpExt.TCPTimeouts = &value
case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = value
+ procNetstat.TcpExt.TCPLossProbes = &value
case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = value
+ procNetstat.TcpExt.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = value
+ procNetstat.TcpExt.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = value
+ procNetstat.TcpExt.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = value
+ procNetstat.TcpExt.TCPRcvCollapsed = &value
case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = value
+ procNetstat.TcpExt.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = value
+ procNetstat.TcpExt.TCPDSACKOfoSent = &value
case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = value
+ procNetstat.TcpExt.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = value
+ procNetstat.TcpExt.TCPDSACKOfoRecv = &value
case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = value
+ procNetstat.TcpExt.TCPAbortOnData = &value
case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = value
+ procNetstat.TcpExt.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = value
+ procNetstat.TcpExt.TCPDeferAcceptDrop = &value
case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = value
+ procNetstat.TcpExt.IPReversePathFilter = &value
case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = value
+ procNetstat.TcpExt.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = value
+ procNetstat.TcpExt.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = value
+ procNetstat.TcpExt.TCPReqQFullDrop = &value
case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = value
+ procNetstat.TcpExt.TCPRetransFail = &value
case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = value
+ procNetstat.TcpExt.TCPRcvCoalesce = &value
+ case "TCPRcvQDrop":
+ procNetstat.TcpExt.TCPRcvQDrop = &value
case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = value
+ procNetstat.TcpExt.TCPOFOQueue = &value
case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = value
+ procNetstat.TcpExt.TCPOFODrop = &value
case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = value
+ procNetstat.TcpExt.TCPOFOMerge = &value
case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = value
+ procNetstat.TcpExt.TCPChallengeACK = &value
case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = value
+ procNetstat.TcpExt.TCPSYNChallenge = &value
case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = value
+ procNetstat.TcpExt.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = value
+ procNetstat.TcpExt.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = value
+ procNetstat.TcpExt.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = value
+ procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = value
+ procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = value
+ procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = value
+ procNetstat.TcpExt.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value
+ procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = value
+ procNetstat.TcpExt.BusyPollRxPackets = &value
case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = value
+ procNetstat.TcpExt.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = value
+ procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = value
+ procNetstat.TcpExt.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = value
+ procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = value
+ procNetstat.TcpExt.TCPSynRetrans = &value
case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = value
+ procNetstat.TcpExt.TCPOrigDataSent = &value
case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = value
+ procNetstat.TcpExt.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = value
+ procNetstat.TcpExt.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = value
+ procNetstat.TcpExt.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = value
+ procNetstat.TcpExt.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = value
+ procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = value
+ procNetstat.TcpExt.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = value
+ procNetstat.TcpExt.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = value
+ procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = value
+ procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = value
+ procNetstat.TcpExt.TCPACKSkippedChallenge = &value
case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = value
+ procNetstat.TcpExt.TCPWinProbe = &value
case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = value
+ procNetstat.TcpExt.TCPKeepAlive = &value
case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = value
+ procNetstat.TcpExt.TCPMTUPFail = &value
case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = value
+ procNetstat.TcpExt.TCPMTUPSuccess = &value
case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = value
+ procNetstat.TcpExt.TCPWqueueTooBig = &value
}
case "IpExt":
switch key {
case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = value
+ procNetstat.IpExt.InNoRoutes = &value
case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = value
+ procNetstat.IpExt.InTruncatedPkts = &value
case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = value
+ procNetstat.IpExt.InMcastPkts = &value
case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = value
+ procNetstat.IpExt.OutMcastPkts = &value
case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = value
+ procNetstat.IpExt.InBcastPkts = &value
case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = value
+ procNetstat.IpExt.OutBcastPkts = &value
case "InOctets":
- procNetstat.IpExt.InOctets = value
+ procNetstat.IpExt.InOctets = &value
case "OutOctets":
- procNetstat.IpExt.OutOctets = value
+ procNetstat.IpExt.OutOctets = &value
case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = value
+ procNetstat.IpExt.InMcastOctets = &value
case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = value
+ procNetstat.IpExt.OutMcastOctets = &value
case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = value
+ procNetstat.IpExt.InBcastOctets = &value
case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = value
+ procNetstat.IpExt.OutBcastOctets = &value
case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = value
+ procNetstat.IpExt.InCsumErrors = &value
case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = value
+ procNetstat.IpExt.InNoECTPkts = &value
case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = value
+ procNetstat.IpExt.InECT1Pkts = &value
case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = value
+ procNetstat.IpExt.InECT0Pkts = &value
case "InCEPkts":
- procNetstat.IpExt.InCEPkts = value
+ procNetstat.IpExt.InCEPkts = &value
case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = value
+ procNetstat.IpExt.ReasmOverlaps = &value
}
}
}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_snmp.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_snmp.go
index ae191896cbd7..6c46b718849c 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -37,100 +37,100 @@ type ProcSnmp struct {
}
type Ip struct { // nolint:revive
- Forwarding float64
- DefaultTTL float64
- InReceives float64
- InHdrErrors float64
- InAddrErrors float64
- ForwDatagrams float64
- InUnknownProtos float64
- InDiscards float64
- InDelivers float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
+ Forwarding *float64
+ DefaultTTL *float64
+ InReceives *float64
+ InHdrErrors *float64
+ InAddrErrors *float64
+ ForwDatagrams *float64
+ InUnknownProtos *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
}
-type Icmp struct {
- InMsgs float64
- InErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InTimeExcds float64
- InParmProbs float64
- InSrcQuenchs float64
- InRedirects float64
- InEchos float64
- InEchoReps float64
- InTimestamps float64
- InTimestampReps float64
- InAddrMasks float64
- InAddrMaskReps float64
- OutMsgs float64
- OutErrors float64
- OutDestUnreachs float64
- OutTimeExcds float64
- OutParmProbs float64
- OutSrcQuenchs float64
- OutRedirects float64
- OutEchos float64
- OutEchoReps float64
- OutTimestamps float64
- OutTimestampReps float64
- OutAddrMasks float64
- OutAddrMaskReps float64
+type Icmp struct { // nolint:revive
+ InMsgs *float64
+ InErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InTimeExcds *float64
+ InParmProbs *float64
+ InSrcQuenchs *float64
+ InRedirects *float64
+ InEchos *float64
+ InEchoReps *float64
+ InTimestamps *float64
+ InTimestampReps *float64
+ InAddrMasks *float64
+ InAddrMaskReps *float64
+ OutMsgs *float64
+ OutErrors *float64
+ OutDestUnreachs *float64
+ OutTimeExcds *float64
+ OutParmProbs *float64
+ OutSrcQuenchs *float64
+ OutRedirects *float64
+ OutEchos *float64
+ OutEchoReps *float64
+ OutTimestamps *float64
+ OutTimestampReps *float64
+ OutAddrMasks *float64
+ OutAddrMaskReps *float64
}
type IcmpMsg struct {
- InType3 float64
- OutType3 float64
+ InType3 *float64
+ OutType3 *float64
}
type Tcp struct { // nolint:revive
- RtoAlgorithm float64
- RtoMin float64
- RtoMax float64
- MaxConn float64
- ActiveOpens float64
- PassiveOpens float64
- AttemptFails float64
- EstabResets float64
- CurrEstab float64
- InSegs float64
- OutSegs float64
- RetransSegs float64
- InErrs float64
- OutRsts float64
- InCsumErrors float64
+ RtoAlgorithm *float64
+ RtoMin *float64
+ RtoMax *float64
+ MaxConn *float64
+ ActiveOpens *float64
+ PassiveOpens *float64
+ AttemptFails *float64
+ EstabResets *float64
+ CurrEstab *float64
+ InSegs *float64
+ OutSegs *float64
+ RetransSegs *float64
+ InErrs *float64
+ OutRsts *float64
+ InCsumErrors *float64
}
type Udp struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
func (p Proc) Snmp() (ProcSnmp, error) {
@@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip":
switch key {
case "Forwarding":
- procSnmp.Ip.Forwarding = value
+ procSnmp.Ip.Forwarding = &value
case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = value
+ procSnmp.Ip.DefaultTTL = &value
case "InReceives":
- procSnmp.Ip.InReceives = value
+ procSnmp.Ip.InReceives = &value
case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = value
+ procSnmp.Ip.InHdrErrors = &value
case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = value
+ procSnmp.Ip.InAddrErrors = &value
case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = value
+ procSnmp.Ip.ForwDatagrams = &value
case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = value
+ procSnmp.Ip.InUnknownProtos = &value
case "InDiscards":
- procSnmp.Ip.InDiscards = value
+ procSnmp.Ip.InDiscards = &value
case "InDelivers":
- procSnmp.Ip.InDelivers = value
+ procSnmp.Ip.InDelivers = &value
case "OutRequests":
- procSnmp.Ip.OutRequests = value
+ procSnmp.Ip.OutRequests = &value
case "OutDiscards":
- procSnmp.Ip.OutDiscards = value
+ procSnmp.Ip.OutDiscards = &value
case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = value
+ procSnmp.Ip.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = value
+ procSnmp.Ip.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = value
+ procSnmp.Ip.ReasmReqds = &value
case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = value
+ procSnmp.Ip.ReasmOKs = &value
case "ReasmFails":
- procSnmp.Ip.ReasmFails = value
+ procSnmp.Ip.ReasmFails = &value
case "FragOKs":
- procSnmp.Ip.FragOKs = value
+ procSnmp.Ip.FragOKs = &value
case "FragFails":
- procSnmp.Ip.FragFails = value
+ procSnmp.Ip.FragFails = &value
case "FragCreates":
- procSnmp.Ip.FragCreates = value
+ procSnmp.Ip.FragCreates = &value
}
case "Icmp":
switch key {
case "InMsgs":
- procSnmp.Icmp.InMsgs = value
+ procSnmp.Icmp.InMsgs = &value
case "InErrors":
- procSnmp.Icmp.InErrors = value
+ procSnmp.Icmp.InErrors = &value
case "InCsumErrors":
- procSnmp.Icmp.InCsumErrors = value
+ procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = value
+ procSnmp.Icmp.InDestUnreachs = &value
case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = value
+ procSnmp.Icmp.InTimeExcds = &value
case "InParmProbs":
- procSnmp.Icmp.InParmProbs = value
+ procSnmp.Icmp.InParmProbs = &value
case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = value
+ procSnmp.Icmp.InSrcQuenchs = &value
case "InRedirects":
- procSnmp.Icmp.InRedirects = value
+ procSnmp.Icmp.InRedirects = &value
case "InEchos":
- procSnmp.Icmp.InEchos = value
+ procSnmp.Icmp.InEchos = &value
case "InEchoReps":
- procSnmp.Icmp.InEchoReps = value
+ procSnmp.Icmp.InEchoReps = &value
case "InTimestamps":
- procSnmp.Icmp.InTimestamps = value
+ procSnmp.Icmp.InTimestamps = &value
case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = value
+ procSnmp.Icmp.InTimestampReps = &value
case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = value
+ procSnmp.Icmp.InAddrMasks = &value
case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = value
+ procSnmp.Icmp.InAddrMaskReps = &value
case "OutMsgs":
- procSnmp.Icmp.OutMsgs = value
+ procSnmp.Icmp.OutMsgs = &value
case "OutErrors":
- procSnmp.Icmp.OutErrors = value
+ procSnmp.Icmp.OutErrors = &value
case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = value
+ procSnmp.Icmp.OutDestUnreachs = &value
case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = value
+ procSnmp.Icmp.OutTimeExcds = &value
case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = value
+ procSnmp.Icmp.OutParmProbs = &value
case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = value
+ procSnmp.Icmp.OutSrcQuenchs = &value
case "OutRedirects":
- procSnmp.Icmp.OutRedirects = value
+ procSnmp.Icmp.OutRedirects = &value
case "OutEchos":
- procSnmp.Icmp.OutEchos = value
+ procSnmp.Icmp.OutEchos = &value
case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = value
+ procSnmp.Icmp.OutEchoReps = &value
case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = value
+ procSnmp.Icmp.OutTimestamps = &value
case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = value
+ procSnmp.Icmp.OutTimestampReps = &value
case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = value
+ procSnmp.Icmp.OutAddrMasks = &value
case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = value
+ procSnmp.Icmp.OutAddrMaskReps = &value
}
case "IcmpMsg":
switch key {
case "InType3":
- procSnmp.IcmpMsg.InType3 = value
+ procSnmp.IcmpMsg.InType3 = &value
case "OutType3":
- procSnmp.IcmpMsg.OutType3 = value
+ procSnmp.IcmpMsg.OutType3 = &value
}
case "Tcp":
switch key {
case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = value
+ procSnmp.Tcp.RtoAlgorithm = &value
case "RtoMin":
- procSnmp.Tcp.RtoMin = value
+ procSnmp.Tcp.RtoMin = &value
case "RtoMax":
- procSnmp.Tcp.RtoMax = value
+ procSnmp.Tcp.RtoMax = &value
case "MaxConn":
- procSnmp.Tcp.MaxConn = value
+ procSnmp.Tcp.MaxConn = &value
case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = value
+ procSnmp.Tcp.ActiveOpens = &value
case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = value
+ procSnmp.Tcp.PassiveOpens = &value
case "AttemptFails":
- procSnmp.Tcp.AttemptFails = value
+ procSnmp.Tcp.AttemptFails = &value
case "EstabResets":
- procSnmp.Tcp.EstabResets = value
+ procSnmp.Tcp.EstabResets = &value
case "CurrEstab":
- procSnmp.Tcp.CurrEstab = value
+ procSnmp.Tcp.CurrEstab = &value
case "InSegs":
- procSnmp.Tcp.InSegs = value
+ procSnmp.Tcp.InSegs = &value
case "OutSegs":
- procSnmp.Tcp.OutSegs = value
+ procSnmp.Tcp.OutSegs = &value
case "RetransSegs":
- procSnmp.Tcp.RetransSegs = value
+ procSnmp.Tcp.RetransSegs = &value
case "InErrs":
- procSnmp.Tcp.InErrs = value
+ procSnmp.Tcp.InErrs = &value
case "OutRsts":
- procSnmp.Tcp.OutRsts = value
+ procSnmp.Tcp.OutRsts = &value
case "InCsumErrors":
- procSnmp.Tcp.InCsumErrors = value
+ procSnmp.Tcp.InCsumErrors = &value
}
case "Udp":
switch key {
case "InDatagrams":
- procSnmp.Udp.InDatagrams = value
+ procSnmp.Udp.InDatagrams = &value
case "NoPorts":
- procSnmp.Udp.NoPorts = value
+ procSnmp.Udp.NoPorts = &value
case "InErrors":
- procSnmp.Udp.InErrors = value
+ procSnmp.Udp.InErrors = &value
case "OutDatagrams":
- procSnmp.Udp.OutDatagrams = value
+ procSnmp.Udp.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.Udp.RcvbufErrors = value
+ procSnmp.Udp.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.Udp.SndbufErrors = value
+ procSnmp.Udp.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.Udp.InCsumErrors = value
+ procSnmp.Udp.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.Udp.IgnoredMulti = value
+ procSnmp.Udp.IgnoredMulti = &value
}
case "UdpLite":
switch key {
case "InDatagrams":
- procSnmp.UdpLite.InDatagrams = value
+ procSnmp.UdpLite.InDatagrams = &value
case "NoPorts":
- procSnmp.UdpLite.NoPorts = value
+ procSnmp.UdpLite.NoPorts = &value
case "InErrors":
- procSnmp.UdpLite.InErrors = value
+ procSnmp.UdpLite.InErrors = &value
case "OutDatagrams":
- procSnmp.UdpLite.OutDatagrams = value
+ procSnmp.UdpLite.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.UdpLite.RcvbufErrors = value
+ procSnmp.UdpLite.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.UdpLite.SndbufErrors = value
+ procSnmp.UdpLite.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.UdpLite.InCsumErrors = value
+ procSnmp.UdpLite.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.UdpLite.IgnoredMulti = value
+ procSnmp.UdpLite.IgnoredMulti = &value
}
}
}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_snmp6.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_snmp6.go
index f611992d52ca..3059cc6a1367 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -36,106 +36,106 @@ type ProcSnmp6 struct {
}
type Ip6 struct { // nolint:revive
- InReceives float64
- InHdrErrors float64
- InTooBigErrors float64
- InNoRoutes float64
- InAddrErrors float64
- InUnknownProtos float64
- InTruncatedPkts float64
- InDiscards float64
- InDelivers float64
- OutForwDatagrams float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
- InMcastPkts float64
- OutMcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
+ InReceives *float64
+ InHdrErrors *float64
+ InTooBigErrors *float64
+ InNoRoutes *float64
+ InAddrErrors *float64
+ InUnknownProtos *float64
+ InTruncatedPkts *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutForwDatagrams *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
}
type Icmp6 struct {
- InMsgs float64
- InErrors float64
- OutMsgs float64
- OutErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InPktTooBigs float64
- InTimeExcds float64
- InParmProblems float64
- InEchos float64
- InEchoReplies float64
- InGroupMembQueries float64
- InGroupMembResponses float64
- InGroupMembReductions float64
- InRouterSolicits float64
- InRouterAdvertisements float64
- InNeighborSolicits float64
- InNeighborAdvertisements float64
- InRedirects float64
- InMLDv2Reports float64
- OutDestUnreachs float64
- OutPktTooBigs float64
- OutTimeExcds float64
- OutParmProblems float64
- OutEchos float64
- OutEchoReplies float64
- OutGroupMembQueries float64
- OutGroupMembResponses float64
- OutGroupMembReductions float64
- OutRouterSolicits float64
- OutRouterAdvertisements float64
- OutNeighborSolicits float64
- OutNeighborAdvertisements float64
- OutRedirects float64
- OutMLDv2Reports float64
- InType1 float64
- InType134 float64
- InType135 float64
- InType136 float64
- InType143 float64
- OutType133 float64
- OutType135 float64
- OutType136 float64
- OutType143 float64
+ InMsgs *float64
+ InErrors *float64
+ OutMsgs *float64
+ OutErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InPktTooBigs *float64
+ InTimeExcds *float64
+ InParmProblems *float64
+ InEchos *float64
+ InEchoReplies *float64
+ InGroupMembQueries *float64
+ InGroupMembResponses *float64
+ InGroupMembReductions *float64
+ InRouterSolicits *float64
+ InRouterAdvertisements *float64
+ InNeighborSolicits *float64
+ InNeighborAdvertisements *float64
+ InRedirects *float64
+ InMLDv2Reports *float64
+ OutDestUnreachs *float64
+ OutPktTooBigs *float64
+ OutTimeExcds *float64
+ OutParmProblems *float64
+ OutEchos *float64
+ OutEchoReplies *float64
+ OutGroupMembQueries *float64
+ OutGroupMembResponses *float64
+ OutGroupMembReductions *float64
+ OutRouterSolicits *float64
+ OutRouterAdvertisements *float64
+ OutNeighborSolicits *float64
+ OutNeighborAdvertisements *float64
+ OutRedirects *float64
+ OutMLDv2Reports *float64
+ InType1 *float64
+ InType134 *float64
+ InType135 *float64
+ InType136 *float64
+ InType143 *float64
+ OutType133 *float64
+ OutType135 *float64
+ OutType136 *float64
+ OutType143 *float64
}
type Udp6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
}
func (p Proc) Snmp6() (ProcSnmp6, error) {
@@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6":
switch key {
case "InReceives":
- procSnmp6.Ip6.InReceives = value
+ procSnmp6.Ip6.InReceives = &value
case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = value
+ procSnmp6.Ip6.InHdrErrors = &value
case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = value
+ procSnmp6.Ip6.InTooBigErrors = &value
case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = value
+ procSnmp6.Ip6.InNoRoutes = &value
case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = value
+ procSnmp6.Ip6.InAddrErrors = &value
case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = value
+ procSnmp6.Ip6.InUnknownProtos = &value
case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = value
+ procSnmp6.Ip6.InTruncatedPkts = &value
case "InDiscards":
- procSnmp6.Ip6.InDiscards = value
+ procSnmp6.Ip6.InDiscards = &value
case "InDelivers":
- procSnmp6.Ip6.InDelivers = value
+ procSnmp6.Ip6.InDelivers = &value
case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = value
+ procSnmp6.Ip6.OutForwDatagrams = &value
case "OutRequests":
- procSnmp6.Ip6.OutRequests = value
+ procSnmp6.Ip6.OutRequests = &value
case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = value
+ procSnmp6.Ip6.OutDiscards = &value
case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = value
+ procSnmp6.Ip6.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = value
+ procSnmp6.Ip6.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = value
+ procSnmp6.Ip6.ReasmReqds = &value
case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = value
+ procSnmp6.Ip6.ReasmOKs = &value
case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = value
+ procSnmp6.Ip6.ReasmFails = &value
case "FragOKs":
- procSnmp6.Ip6.FragOKs = value
+ procSnmp6.Ip6.FragOKs = &value
case "FragFails":
- procSnmp6.Ip6.FragFails = value
+ procSnmp6.Ip6.FragFails = &value
case "FragCreates":
- procSnmp6.Ip6.FragCreates = value
+ procSnmp6.Ip6.FragCreates = &value
case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = value
+ procSnmp6.Ip6.InMcastPkts = &value
case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = value
+ procSnmp6.Ip6.OutMcastPkts = &value
case "InOctets":
- procSnmp6.Ip6.InOctets = value
+ procSnmp6.Ip6.InOctets = &value
case "OutOctets":
- procSnmp6.Ip6.OutOctets = value
+ procSnmp6.Ip6.OutOctets = &value
case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = value
+ procSnmp6.Ip6.InMcastOctets = &value
case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = value
+ procSnmp6.Ip6.OutMcastOctets = &value
case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = value
+ procSnmp6.Ip6.InBcastOctets = &value
case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = value
+ procSnmp6.Ip6.OutBcastOctets = &value
case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = value
+ procSnmp6.Ip6.InNoECTPkts = &value
case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = value
+ procSnmp6.Ip6.InECT1Pkts = &value
case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = value
+ procSnmp6.Ip6.InECT0Pkts = &value
case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = value
+ procSnmp6.Ip6.InCEPkts = &value
}
case "Icmp6":
switch key {
case "InMsgs":
- procSnmp6.Icmp6.InMsgs = value
+ procSnmp6.Icmp6.InMsgs = &value
case "InErrors":
- procSnmp6.Icmp6.InErrors = value
+ procSnmp6.Icmp6.InErrors = &value
case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = value
+ procSnmp6.Icmp6.OutMsgs = &value
case "OutErrors":
- procSnmp6.Icmp6.OutErrors = value
+ procSnmp6.Icmp6.OutErrors = &value
case "InCsumErrors":
- procSnmp6.Icmp6.InCsumErrors = value
+ procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = value
+ procSnmp6.Icmp6.InDestUnreachs = &value
case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = value
+ procSnmp6.Icmp6.InPktTooBigs = &value
case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = value
+ procSnmp6.Icmp6.InTimeExcds = &value
case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = value
+ procSnmp6.Icmp6.InParmProblems = &value
case "InEchos":
- procSnmp6.Icmp6.InEchos = value
+ procSnmp6.Icmp6.InEchos = &value
case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = value
+ procSnmp6.Icmp6.InEchoReplies = &value
case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = value
+ procSnmp6.Icmp6.InGroupMembQueries = &value
case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = value
+ procSnmp6.Icmp6.InGroupMembResponses = &value
case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = value
+ procSnmp6.Icmp6.InGroupMembReductions = &value
case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = value
+ procSnmp6.Icmp6.InRouterSolicits = &value
case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = value
+ procSnmp6.Icmp6.InRouterAdvertisements = &value
case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = value
+ procSnmp6.Icmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = value
+ procSnmp6.Icmp6.InNeighborAdvertisements = &value
case "InRedirects":
- procSnmp6.Icmp6.InRedirects = value
+ procSnmp6.Icmp6.InRedirects = &value
case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = value
+ procSnmp6.Icmp6.InMLDv2Reports = &value
case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = value
+ procSnmp6.Icmp6.OutDestUnreachs = &value
case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = value
+ procSnmp6.Icmp6.OutPktTooBigs = &value
case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = value
+ procSnmp6.Icmp6.OutTimeExcds = &value
case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = value
+ procSnmp6.Icmp6.OutParmProblems = &value
case "OutEchos":
- procSnmp6.Icmp6.OutEchos = value
+ procSnmp6.Icmp6.OutEchos = &value
case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = value
+ procSnmp6.Icmp6.OutEchoReplies = &value
case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = value
+ procSnmp6.Icmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = value
+ procSnmp6.Icmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = value
+ procSnmp6.Icmp6.OutGroupMembReductions = &value
case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = value
+ procSnmp6.Icmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = value
+ procSnmp6.Icmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = value
+ procSnmp6.Icmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = value
+ procSnmp6.Icmp6.OutNeighborAdvertisements = &value
case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = value
+ procSnmp6.Icmp6.OutRedirects = &value
case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = value
+ procSnmp6.Icmp6.OutMLDv2Reports = &value
case "InType1":
- procSnmp6.Icmp6.InType1 = value
+ procSnmp6.Icmp6.InType1 = &value
case "InType134":
- procSnmp6.Icmp6.InType134 = value
+ procSnmp6.Icmp6.InType134 = &value
case "InType135":
- procSnmp6.Icmp6.InType135 = value
+ procSnmp6.Icmp6.InType135 = &value
case "InType136":
- procSnmp6.Icmp6.InType136 = value
+ procSnmp6.Icmp6.InType136 = &value
case "InType143":
- procSnmp6.Icmp6.InType143 = value
+ procSnmp6.Icmp6.InType143 = &value
case "OutType133":
- procSnmp6.Icmp6.OutType133 = value
+ procSnmp6.Icmp6.OutType133 = &value
case "OutType135":
- procSnmp6.Icmp6.OutType135 = value
+ procSnmp6.Icmp6.OutType135 = &value
case "OutType136":
- procSnmp6.Icmp6.OutType136 = value
+ procSnmp6.Icmp6.OutType136 = &value
case "OutType143":
- procSnmp6.Icmp6.OutType143 = value
+ procSnmp6.Icmp6.OutType143 = &value
}
case "Udp6":
switch key {
case "InDatagrams":
- procSnmp6.Udp6.InDatagrams = value
+ procSnmp6.Udp6.InDatagrams = &value
case "NoPorts":
- procSnmp6.Udp6.NoPorts = value
+ procSnmp6.Udp6.NoPorts = &value
case "InErrors":
- procSnmp6.Udp6.InErrors = value
+ procSnmp6.Udp6.InErrors = &value
case "OutDatagrams":
- procSnmp6.Udp6.OutDatagrams = value
+ procSnmp6.Udp6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.Udp6.RcvbufErrors = value
+ procSnmp6.Udp6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.Udp6.SndbufErrors = value
+ procSnmp6.Udp6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.Udp6.InCsumErrors = value
+ procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = value
+ procSnmp6.Udp6.IgnoredMulti = &value
}
case "UdpLite6":
switch key {
case "InDatagrams":
- procSnmp6.UdpLite6.InDatagrams = value
+ procSnmp6.UdpLite6.InDatagrams = &value
case "NoPorts":
- procSnmp6.UdpLite6.NoPorts = value
+ procSnmp6.UdpLite6.NoPorts = &value
case "InErrors":
- procSnmp6.UdpLite6.InErrors = value
+ procSnmp6.UdpLite6.InErrors = &value
case "OutDatagrams":
- procSnmp6.UdpLite6.OutDatagrams = value
+ procSnmp6.UdpLite6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.UdpLite6.RcvbufErrors = value
+ procSnmp6.UdpLite6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.UdpLite6.SndbufErrors = value
+ procSnmp6.UdpLite6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.UdpLite6.InCsumErrors = value
+ procSnmp6.UdpLite6.InCsumErrors = &value
}
}
}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_stat.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_stat.go
index 06c556ef9623..b278eb2c2df7 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -102,6 +102,8 @@ type ProcStat struct {
RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
+ // CPU number last executed on.
+ Processor uint
// Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint
@@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) {
&ignoreUint64,
&ignoreUint64,
&ignoreInt64,
- &ignoreInt64,
+ &s.Processor,
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_status.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_status.go
index 594022ded48a..3d8c06439a93 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/proc_status.go
@@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) {
kv := strings.SplitN(line, ":", 2)
// removes spaces
- k := string(strings.TrimSpace(kv[0]))
- v := string(strings.TrimSpace(kv[1]))
+ k := strings.TrimSpace(kv[0])
+ v := strings.TrimSpace(kv[1])
// removes "kB"
- v = string(bytes.Trim([]byte(v), " kB"))
+ v = strings.TrimSuffix(v, " kB")
// value to int when possible
// we can skip error check here, 'cause vKBytes is not used when value is a string
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/stat.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/stat.go
index 33f97caa08da..586af48af9f6 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/stat.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/stat.go
@@ -62,7 +62,7 @@ type Stat struct {
// Summed up cpu statistics.
CPUTotal CPUStat
// Per-CPU statistics.
- CPU []CPUStat
+ CPU map[int64]CPUStat
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
IRQTotal uint64
// Number of times a numbered IRQ was triggered.
@@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) {
if err != nil {
return Stat{}, err
}
+ procStat, err := parseStat(bytes.NewReader(data), fileName)
+ if err != nil {
+ return Stat{}, err
+ }
+ return procStat, nil
+}
- stat := Stat{}
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ stat = Stat{
+ CPU: make(map[int64]CPUStat),
+ }
+ err error
+ )
- scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) {
if cpuID == -1 {
stat.CPUTotal = cpuStat
} else {
- for int64(len(stat.CPU)) <= cpuID {
- stat.CPU = append(stat.CPU, CPUStat{})
- }
stat.CPU[cpuID] = cpuStat
}
}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/thread.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 000000000000..f08bfc769db1
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ d, err := os.Open(taskPath)
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ }
+
+ t := Procs{}
+ for _, n := range names {
+ tid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
+ }
+
+ return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ if _, err := os.Stat(taskPath); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+ tfs := fsi.FS(proc.path("task"))
+ if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/cluster-autoscaler/vendor/github.com/prometheus/procfs/vm.go b/cluster-autoscaler/vendor/github.com/prometheus/procfs/vm.go
index 20ceb77e2df7..cdedcae996d8 100644
--- a/cluster-autoscaler/vendor/github.com/prometheus/procfs/vm.go
+++ b/cluster-autoscaler/vendor/github.com/prometheus/procfs/vm.go
@@ -26,7 +26,9 @@ import (
)
// The VM interface is described at
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/.golangci.yml b/cluster-autoscaler/vendor/github.com/spf13/cobra/.golangci.yml
index 439d3e1de4ec..2578d94b5ebd 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/.golangci.yml
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/.golangci.yml
@@ -1,4 +1,4 @@
-# Copyright 2013-2022 The Cobra Authors
+# Copyright 2013-2023 The Cobra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/Makefile b/cluster-autoscaler/vendor/github.com/spf13/cobra/Makefile
index c433a01bcedb..0da8d7aa08a9 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/Makefile
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/Makefile
@@ -5,10 +5,6 @@ ifeq (, $(shell which golangci-lint))
$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
endif
-ifeq (, $(shell which richgo))
-$(warning "could not find richgo in $(PATH), run: go install github.com/kyoh86/richgo@latest")
-endif
-
.PHONY: fmt lint test install_deps clean
default: all
@@ -25,6 +21,10 @@ lint:
test: install_deps
$(info ******************** running tests ********************)
+ go test -v ./...
+
+richtest: install_deps
+ $(info ******************** running tests with kyoh86/richgo ********************)
richgo test -v ./...
install_deps:
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/README.md b/cluster-autoscaler/vendor/github.com/spf13/cobra/README.md
index 7cc726beb42d..592c0b8ab05e 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/README.md
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/README.md
@@ -1,4 +1,4 @@
-![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
+![cobra logo](assets/CobraMain.png)
Cobra is a library for creating powerful modern CLI applications.
@@ -6,7 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
-[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
+[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra)
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199)
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/active_help.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/active_help.go
index 95e03aecb603..2d0239437a8f 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/active_help.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/active_help.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/args.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/args.go
index 2c1f99e7870d..e79ec33a81d8 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/args.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/args.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import (
type PositionalArgs func(cmd *Command, args []string) error
-// Legacy arg validation has the following behaviour:
+// legacyArgs validation has the following behaviour:
// - root commands with no subcommands can take arbitrary arguments
// - root commands with subcommands will do subcommand validity checking
// - subcommands will always accept arbitrary arguments
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/bash_completions.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/bash_completions.go
index 3acdb27974e8..10c78847de23 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/bash_completions.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -532,7 +532,7 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
}
}
-// Setup annotations for go completions for registered flags
+// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags
func prepareCustomAnnotationsForFlags(cmd *Command) {
flagCompletionMutex.RLock()
defer flagCompletionMutex.RUnlock()
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/bash_completionsV2.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/bash_completionsV2.go
index bb4b71892c2a..19b09560c1e0 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/bash_completionsV2.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/bash_completionsV2.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -38,7 +38,7 @@ func genBashComp(buf io.StringWriter, name string, includeDesc bool) {
__%[1]s_debug()
{
- if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
+ if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
fi
}
@@ -65,7 +65,7 @@ __%[1]s_get_completion_results() {
lastChar=${lastParam:$((${#lastParam}-1)):1}
__%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
- if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ if [[ -z ${cur} && ${lastChar} != = ]]; then
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
__%[1]s_debug "Adding extra empty parameter"
@@ -75,7 +75,7 @@ __%[1]s_get_completion_results() {
# When completing a flag with an = (e.g., %[1]s -n=)
# bash focuses on the part after the =, so we need to remove
# the flag part from $cur
- if [[ "${cur}" == -*=* ]]; then
+ if [[ ${cur} == -*=* ]]; then
cur="${cur#*=}"
fi
@@ -87,7 +87,7 @@ __%[1]s_get_completion_results() {
directive=${out##*:}
# Remove the directive
out=${out%%:*}
- if [ "${directive}" = "${out}" ]; then
+ if [[ ${directive} == "${out}" ]]; then
# There is not directive specified
directive=0
fi
@@ -101,22 +101,36 @@ __%[1]s_process_completion_results() {
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveKeepOrder=%[8]d
- if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ if (((directive & shellCompDirectiveError) != 0)); then
# Error code. No completion.
__%[1]s_debug "Received error from custom completion go code"
return
else
- if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
- if [[ $(type -t compopt) = "builtin" ]]; then
+ if (((directive & shellCompDirectiveNoSpace) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
__%[1]s_debug "Activating no space"
compopt -o nospace
else
__%[1]s_debug "No space directive not supported in this version of bash"
fi
fi
- if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
- if [[ $(type -t compopt) = "builtin" ]]; then
+ if (((directive & shellCompDirectiveKeepOrder) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ # no sort isn't supported for bash less than < 4.4
+ if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then
+ __%[1]s_debug "No sort directive not supported in this version of bash"
+ else
+ __%[1]s_debug "Activating keep order"
+ compopt -o nosort
+ fi
+ else
+ __%[1]s_debug "No sort directive not supported in this version of bash"
+ fi
+ fi
+ if (((directive & shellCompDirectiveNoFileComp) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
__%[1]s_debug "Activating no file completion"
compopt +o default
else
@@ -130,7 +144,7 @@ __%[1]s_process_completion_results() {
local activeHelp=()
__%[1]s_extract_activeHelp
- if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ if (((directive & shellCompDirectiveFilterFileExt) != 0)); then
# File extension filtering
local fullFilter filter filteringCmd
@@ -143,13 +157,12 @@ __%[1]s_process_completion_results() {
filteringCmd="_filedir $fullFilter"
__%[1]s_debug "File filtering command: $filteringCmd"
$filteringCmd
- elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ elif (((directive & shellCompDirectiveFilterDirs) != 0)); then
# File completion for directories only
- # Use printf to strip any trailing newline
local subdir
- subdir=$(printf "%%s" "${completions[0]}")
- if [ -n "$subdir" ]; then
+ subdir=${completions[0]}
+ if [[ -n $subdir ]]; then
__%[1]s_debug "Listing directories in $subdir"
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
else
@@ -164,7 +177,7 @@ __%[1]s_process_completion_results() {
__%[1]s_handle_special_char "$cur" =
# Print the activeHelp statements before we finish
- if [ ${#activeHelp[*]} -ne 0 ]; then
+ if ((${#activeHelp[*]} != 0)); then
printf "\n";
printf "%%s\n" "${activeHelp[@]}"
printf "\n"
@@ -184,21 +197,21 @@ __%[1]s_process_completion_results() {
# Separate activeHelp lines from real completions.
# Fills the $activeHelp and $completions arrays.
__%[1]s_extract_activeHelp() {
- local activeHelpMarker="%[8]s"
+ local activeHelpMarker="%[9]s"
local endIndex=${#activeHelpMarker}
while IFS='' read -r comp; do
- if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
+ if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then
comp=${comp:endIndex}
__%[1]s_debug "ActiveHelp found: $comp"
- if [ -n "$comp" ]; then
+ if [[ -n $comp ]]; then
activeHelp+=("$comp")
fi
else
# Not an activeHelp line but a normal completion
completions+=("$comp")
fi
- done < <(printf "%%s\n" "${out}")
+ done <<<"${out}"
}
__%[1]s_handle_completion_types() {
@@ -254,7 +267,7 @@ __%[1]s_handle_standard_completion_case() {
done < <(printf "%%s\n" "${completions[@]}")
# If there is a single completion left, remove the description text
- if [ ${#COMPREPLY[*]} -eq 1 ]; then
+ if ((${#COMPREPLY[*]} == 1)); then
__%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
comp="${COMPREPLY[0]%%%%$tab*}"
__%[1]s_debug "Removed description from single completion, which is now: ${comp}"
@@ -271,8 +284,8 @@ __%[1]s_handle_special_char()
if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
local word=${comp%%"${comp##*${char}}"}
local idx=${#COMPREPLY[*]}
- while [[ $((--idx)) -ge 0 ]]; do
- COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"}
+ while ((--idx >= 0)); do
+ COMPREPLY[idx]=${COMPREPLY[idx]#"$word"}
done
fi
}
@@ -298,7 +311,7 @@ __%[1]s_format_comp_descriptions()
# Make sure we can fit a description of at least 8 characters
# if we are to align the descriptions.
- if [[ $maxdesclength -gt 8 ]]; then
+ if ((maxdesclength > 8)); then
# Add the proper number of spaces to align the descriptions
for ((i = ${#comp} ; i < longest ; i++)); do
comp+=" "
@@ -310,8 +323,8 @@ __%[1]s_format_comp_descriptions()
# If there is enough space for any description text,
# truncate the descriptions that are too long for the shell width
- if [ $maxdesclength -gt 0 ]; then
- if [ ${#desc} -gt $maxdesclength ]; then
+ if ((maxdesclength > 0)); then
+ if ((${#desc} > maxdesclength)); then
desc=${desc:0:$(( maxdesclength - 1 ))}
desc+="…"
fi
@@ -332,9 +345,9 @@ __start_%[1]s()
# Call _init_completion from the bash-completion package
# to prepare the arguments properly
if declare -F _init_completion >/dev/null 2>&1; then
- _init_completion -n "=:" || return
+ _init_completion -n =: || return
else
- __%[1]s_init_completion -n "=:" || return
+ __%[1]s_init_completion -n =: || return
fi
__%[1]s_debug
@@ -361,7 +374,7 @@ fi
# ex: ts=4 sw=4 et filetype=sh
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
activeHelpMarker))
}
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/cobra.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/cobra.go
index fe44bc8a07ec..b07b44a0ce22 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/cobra.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/cobra.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -167,8 +167,8 @@ func appendIfNotPresent(s, stringToAppend string) string {
// rpad adds padding to the right of a string.
func rpad(s string, padding int) string {
- template := fmt.Sprintf("%%-%ds", padding)
- return fmt.Sprintf(template, s)
+ formattedString := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(formattedString, s)
}
// tmpl executes the given template text on data, writing the result to w.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/command.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/command.go
index 6ff47dd5c35b..01f7c6f1c5e3 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/command.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/command.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@ const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra"
// FParseErrWhitelist configures Flag parse errors to be ignored
type FParseErrWhitelist flag.ParseErrorsWhitelist
-// Structure to manage groups for commands
+// Group Structure to manage groups for commands
type Group struct {
ID string
Title string
@@ -47,7 +47,7 @@ type Group struct {
// definition to ensure usability.
type Command struct {
// Use is the one-line usage message.
- // Recommended syntax is as follow:
+ // Recommended syntax is as follows:
// [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
// ... indicates that you can specify multiple values for the previous argument.
// | indicates mutually exclusive information. You can use the argument to the left of the separator or the
@@ -321,7 +321,7 @@ func (c *Command) SetHelpCommand(cmd *Command) {
c.helpCommand = cmd
}
-// SetHelpCommandGroup sets the group id of the help command.
+// SetHelpCommandGroupID sets the group id of the help command.
func (c *Command) SetHelpCommandGroupID(groupID string) {
if c.helpCommand != nil {
c.helpCommand.GroupID = groupID
@@ -330,7 +330,7 @@ func (c *Command) SetHelpCommandGroupID(groupID string) {
c.helpCommandGroupID = groupID
}
-// SetCompletionCommandGroup sets the group id of the completion command.
+// SetCompletionCommandGroupID sets the group id of the completion command.
func (c *Command) SetCompletionCommandGroupID(groupID string) {
// completionCommandGroupID is used if no completion command is defined by the user
c.Root().completionCommandGroupID = groupID
@@ -655,20 +655,44 @@ Loop:
// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
-func argsMinusFirstX(args []string, x string) []string {
- for i, y := range args {
- if x == y {
- ret := []string{}
- ret = append(ret, args[:i]...)
- ret = append(ret, args[i+1:]...)
- return ret
+// Special care needs to be taken not to remove a flag value.
+func (c *Command) argsMinusFirstX(args []string, x string) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+ flags := c.Flags()
+
+Loop:
+ for pos := 0; pos < len(args); pos++ {
+ s := args[pos]
+ switch {
+ case s == "--":
+ // -- means we have reached the end of the parseable args. Break out of the loop now.
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ fallthrough
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip
+ // over the next arg, because that is the value of this flag.
+ pos++
+ continue
+ case !strings.HasPrefix(s, "-"):
+ // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so,
+ // return the args, excluding the one at this position.
+ if s == x {
+ ret := []string{}
+ ret = append(ret, args[:pos]...)
+ ret = append(ret, args[pos+1:]...)
+ return ret
+ }
}
}
return args
}
func isFlagArg(arg string) bool {
- return ((len(arg) >= 3 && arg[1] == '-') ||
+ return ((len(arg) >= 3 && arg[0:2] == "--") ||
(len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
}
@@ -686,7 +710,7 @@ func (c *Command) Find(args []string) (*Command, []string, error) {
cmd := c.findNext(nextSubCmd)
if cmd != nil {
- return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+ return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd))
}
return c, innerArgs
}
@@ -1272,7 +1296,7 @@ func (c *Command) AllChildCommandsHaveGroup() bool {
return true
}
-// ContainGroups return if groupID exists in the list of command groups.
+// ContainsGroup return if groupID exists in the list of command groups.
func (c *Command) ContainsGroup(groupID string) bool {
for _, x := range c.commandgroups {
if x.ID == groupID {
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/command_notwin.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/command_notwin.go
index 2b77f8f01909..307f0c127fd8 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/command_notwin.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/command_notwin.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/command_win.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/command_win.go
index 520f23abf09f..adbef395c255 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/command_win.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/command_win.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/completions.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/completions.go
index e8a0206db104..ee38c4d0b864 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/completions.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/completions.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -77,6 +77,10 @@ const (
// obtain the same behavior but only for flags.
ShellCompDirectiveFilterDirs
+ // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order
+ // in which the completions are provided
+ ShellCompDirectiveKeepOrder
+
// ===========================================================================
// All directives using iota should be above this one.
@@ -159,6 +163,9 @@ func (d ShellCompDirective) string() string {
if d&ShellCompDirectiveFilterDirs != 0 {
directives = append(directives, "ShellCompDirectiveFilterDirs")
}
+ if d&ShellCompDirectiveKeepOrder != 0 {
+ directives = append(directives, "ShellCompDirectiveKeepOrder")
+ }
if len(directives) == 0 {
directives = append(directives, "ShellCompDirectiveDefault")
}
@@ -169,7 +176,7 @@ func (d ShellCompDirective) string() string {
return strings.Join(directives, ", ")
}
-// Adds a special hidden command that can be used to request custom completions.
+// initCompleteCmd adds a special hidden command that can be used to request custom completions.
func (c *Command) initCompleteCmd(args []string) {
completeCmd := &Command{
Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
@@ -727,7 +734,7 @@ to enable it. You can execute the following once:
To load completions in your current shell session:
- source <(%[1]s completion zsh); compdef _%[1]s %[1]s
+ source <(%[1]s completion zsh)
To load completions for every new session, execute once:
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/fish_completions.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/fish_completions.go
index 97112a17b29a..12ca0d2b11c4 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/fish_completions.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/fish_completions.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -53,7 +53,7 @@ function __%[1]s_perform_completion
__%[1]s_debug "last arg: $lastArg"
# Disable ActiveHelp which is not supported for fish shell
- set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
+ set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
__%[1]s_debug "Calling $requestComp"
set -l results (eval $requestComp 2> /dev/null)
@@ -89,6 +89,60 @@ function __%[1]s_perform_completion
printf "%%s\n" "$directiveLine"
end
+# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result
+function __%[1]s_perform_completion_once
+ __%[1]s_debug "Starting __%[1]s_perform_completion_once"
+
+ if test -n "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion"
+ return 0
+ end
+
+ set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion)
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "No completions, probably due to a failure"
+ return 1
+ end
+
+ __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result"
+ return 0
+end
+
+# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run
+function __%[1]s_clear_perform_completion_once_result
+ __%[1]s_debug ""
+ __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable =========="
+ set --erase __%[1]s_perform_completion_once_result
+ __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result"
+end
+
+function __%[1]s_requires_order_preservation
+ __%[1]s_debug ""
+ __%[1]s_debug "========= checking if order preservation is required =========="
+
+ __%[1]s_perform_completion_once
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "Error determining if order preservation is required"
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
+ __%[1]s_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveKeepOrder %[9]d
+ set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2)
+ __%[1]s_debug "Keeporder is: $keeporder"
+
+ if test $keeporder -ne 0
+ __%[1]s_debug "This does require order preservation"
+ return 0
+ end
+
+ __%[1]s_debug "This doesn't require order preservation"
+ return 1
+end
+
+
# This function does two things:
# - Obtain the completions and store them in the global __%[1]s_comp_results
# - Return false if file completion should be performed
@@ -99,17 +153,17 @@ function __%[1]s_prepare_completions
# Start fresh
set --erase __%[1]s_comp_results
- set -l results (__%[1]s_perform_completion)
- __%[1]s_debug "Completion results: $results"
+ __%[1]s_perform_completion_once
+ __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result"
- if test -z "$results"
+ if test -z "$__%[1]s_perform_completion_once_result"
__%[1]s_debug "No completion, probably due to a failure"
# Might as well do file completion, in case it helps
return 1
end
- set -l directive (string sub --start 2 $results[-1])
- set --global __%[1]s_comp_results $results[1..-2]
+ set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
+ set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2]
__%[1]s_debug "Completions are: $__%[1]s_comp_results"
__%[1]s_debug "Directive is: $directive"
@@ -205,13 +259,17 @@ end
# Remove any pre-existing completions for the program since we will be handling all of them.
complete -c %[2]s -e
+# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global
+complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result'
# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
# which provides the program's completion choices.
-complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
-
+# If this doesn't require order preservation, we don't use the -k flag
+complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+# otherwise we use the -k flag
+complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
`, nameForVar, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
}
// GenFishCompletion generates fish completion file and writes to the passed writer.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/flag_groups.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/flag_groups.go
index 9c377aaf9c90..b35fde155488 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/flag_groups.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/flag_groups.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/powershell_completions.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/powershell_completions.go
index 004de42e41e9..177d2755f216 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/powershell_completions.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -77,6 +77,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
$ShellCompDirectiveNoFileComp=%[6]d
$ShellCompDirectiveFilterFileExt=%[7]d
$ShellCompDirectiveFilterDirs=%[8]d
+ $ShellCompDirectiveKeepOrder=%[9]d
# Prepare the command to request completions for the program.
# Split the command at the first space to separate the program and arguments.
@@ -106,13 +107,22 @@ filter __%[1]s_escapeStringWithSpecialChars {
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
__%[1]s_debug "Adding extra empty parameter"
-`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+`
-`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
+ # PowerShell 7.2+ changed the way how the arguments are passed to executables,
+ # so for pre-7.2 or when Legacy argument passing is enabled we need to use
+`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+`
+ if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or
+ ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or
+ (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and
+ $PSNativeCommandArgumentPassing -eq 'Legacy')) {
+`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
+ } else {
+ $RequestComp="$RequestComp" + ' ""'
+ }
}
__%[1]s_debug "Calling $RequestComp"
# First disable ActiveHelp which is not supported for Powershell
- $env:%[9]s=0
+ $env:%[10]s=0
#call the command store the output in $out and redirect stderr and stdout to null
# $Out is an array contains each line per element
@@ -137,7 +147,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
}
$Longest = 0
- $Values = $Out | ForEach-Object {
+ [Array]$Values = $Out | ForEach-Object {
#Split the output in name and description
`+" $Name, $Description = $_.Split(\"`t\",2)"+`
__%[1]s_debug "Name: $Name Description: $Description"
@@ -182,6 +192,11 @@ filter __%[1]s_escapeStringWithSpecialChars {
}
}
+ # we sort the values in ascending order by name if keep order isn't passed
+ if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) {
+ $Values = $Values | Sort-Object -Property Name
+ }
+
if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
__%[1]s_debug "ShellCompDirectiveNoFileComp is called"
@@ -267,7 +282,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock
`, name, nameForVar, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
}
func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/projects_using_cobra.md b/cluster-autoscaler/vendor/github.com/spf13/cobra/projects_using_cobra.md
index 6865f88e79a6..8a291eb20e8d 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/projects_using_cobra.md
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/projects_using_cobra.md
@@ -1,11 +1,13 @@
## Projects using Cobra
- [Allero](https://github.com/allero-io/allero)
+- [Arewefastyet](https://benchmark.vitess.io)
- [Arduino CLI](https://github.com/arduino/arduino-cli)
- [Bleve](https://blevesearch.com/)
- [Cilium](https://cilium.io/)
- [CloudQuery](https://github.com/cloudquery/cloudquery)
- [CockroachDB](https://www.cockroachlabs.com/)
+- [Constellation](https://github.com/edgelesssys/constellation)
- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk)
- [Datree](https://github.com/datreeio/datree)
- [Delve](https://github.com/derekparker/delve)
@@ -25,7 +27,7 @@
- [Istio](https://istio.io)
- [Kool](https://github.com/kool-dev/kool)
- [Kubernetes](https://kubernetes.io/)
-- [Kubescape](https://github.com/armosec/kubescape)
+- [Kubescape](https://github.com/kubescape/kubescape)
- [KubeVirt](https://github.com/kubevirt/kubevirt)
- [Linkerd](https://linkerd.io/)
- [Mattermost-server](https://github.com/mattermost/mattermost-server)
@@ -51,10 +53,12 @@
- [Random](https://github.com/erdaltsksn/random)
- [Rclone](https://rclone.org/)
- [Scaleway CLI](https://github.com/scaleway/scaleway-cli)
+- [Sia](https://github.com/SiaFoundation/siad)
- [Skaffold](https://skaffold.dev/)
- [Tendermint](https://github.com/tendermint/tendermint)
- [Twitch CLI](https://github.com/twitchdev/twitch-cli)
- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli)
+- [Vitess](https://vitess.io)
- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework)
- [Werf](https://werf.io/)
- [ZITADEL](https://github.com/zitadel/zitadel)
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/shell_completions.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/shell_completions.go
index 126e83c307e0..b035742d3994 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/shell_completions.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/shell_completions.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/shell_completions.md b/cluster-autoscaler/vendor/github.com/spf13/cobra/shell_completions.md
index 553ee5df8a7c..065c0621d4c7 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/shell_completions.md
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/shell_completions.md
@@ -71,7 +71,7 @@ PowerShell:
`,cmd.Root().Name()),
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
- Args: cobra.ExactValidArgs(1),
+ Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
Run: func(cmd *cobra.Command, args []string) {
switch args[0] {
case "bash":
@@ -162,16 +162,7 @@ cmd := &cobra.Command{
}
```
-The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
-the completion algorithm if entered manually, e.g. in:
-
-```bash
-$ kubectl get rc [tab][tab]
-backend frontend database
-```
-
-Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of
-replication controllers following `rc`.
+The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`.
### Dynamic completion of nouns
@@ -237,6 +228,10 @@ ShellCompDirectiveFilterFileExt
// return []string{"themes"}, ShellCompDirectiveFilterDirs
//
ShellCompDirectiveFilterDirs
+
+// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order
+// in which the completions are provided
+ShellCompDirectiveKeepOrder
```
***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
@@ -385,6 +380,19 @@ or
```go
ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"}
```
+
+If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like:
+
+```bash
+$ source <(helm completion bash)
+$ helm completion [tab][tab]
+bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell)
+fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh)
+
+$ source <(helm completion bash --no-descriptions)
+$ helm completion [tab][tab]
+bash fish powershell zsh
+```
## Bash completions
### Dependencies
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/user_guide.md b/cluster-autoscaler/vendor/github.com/spf13/cobra/user_guide.md
index e55367e853f5..85201d840c87 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/user_guide.md
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/user_guide.md
@@ -188,6 +188,37 @@ var versionCmd = &cobra.Command{
}
```
+### Organizing subcommands
+
+A command may have subcommands which in turn may have other subcommands. This is achieved by using
+`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in
+its own go package.
+
+The suggested approach is for the parent command to use `AddCommand` to add its most immediate
+subcommands. For example, consider the following directory structure:
+
+```text
+├── cmd
+│ ├── root.go
+│ └── sub1
+│ ├── sub1.go
+│ └── sub2
+│ ├── leafA.go
+│ ├── leafB.go
+│ └── sub2.go
+└── main.go
+```
+
+In this case:
+
+* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command.
+* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command.
+* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the
+ sub2 command.
+
+This approach ensures the subcommands are always included at compile time while avoiding cyclic
+references.
+
### Returning and handling errors
If you wish to return an error to the caller of a command, `RunE` can be used.
@@ -313,8 +344,8 @@ rootCmd.MarkFlagsRequiredTogether("username", "password")
You can also prevent different flags from being provided together if they represent mutually
exclusive options such as specifying an output format as either `--json` or `--yaml` but never both:
```go
-rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON")
-rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML")
+rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON")
+rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML")
rootCmd.MarkFlagsMutuallyExclusive("json", "yaml")
```
@@ -349,7 +380,7 @@ shown below:
```go
var cmd = &cobra.Command{
Short: "hello",
- Args: MatchAll(ExactArgs(2), OnlyValidArgs),
+ Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs),
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Hello, World!")
},
diff --git a/cluster-autoscaler/vendor/github.com/spf13/cobra/zsh_completions.go b/cluster-autoscaler/vendor/github.com/spf13/cobra/zsh_completions.go
index 84cec76fde35..1856e4c7f689 100644
--- a/cluster-autoscaler/vendor/github.com/spf13/cobra/zsh_completions.go
+++ b/cluster-autoscaler/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -1,4 +1,4 @@
-// Copyright 2013-2022 The Cobra Authors
+// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -90,6 +90,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
compCmd = ShellCompNoDescRequestCmd
}
WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
+compdef _%[1]s %[1]s
# zsh completion for %-36[1]s -*- shell-script -*-
@@ -108,8 +109,9 @@ _%[1]s()
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveKeepOrder=%[8]d
- local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace
+ local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder
local -a completions
__%[1]s_debug "\n========= starting completion logic =========="
@@ -177,7 +179,7 @@ _%[1]s()
return
fi
- local activeHelpMarker="%[8]s"
+ local activeHelpMarker="%[9]s"
local endIndex=${#activeHelpMarker}
local startIndex=$((${#activeHelpMarker}+1))
local hasActiveHelp=0
@@ -227,6 +229,11 @@ _%[1]s()
noSpace="-S ''"
fi
+ if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then
+ __%[1]s_debug "Activating keep order."
+ keepOrder="-V"
+ fi
+
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local filteringCmd
@@ -262,7 +269,7 @@ _%[1]s()
return $result
else
__%[1]s_debug "Calling _describe"
- if eval _describe "completions" completions $flagPrefix $noSpace; then
+ if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then
__%[1]s_debug "_describe found some completions"
# Return the success of having called _describe
@@ -296,6 +303,6 @@ if [ "$funcstack[1]" = "_%[1]s" ]; then
fi
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
activeHelpMarker))
}
diff --git a/cluster-autoscaler/vendor/github.com/stoewer/go-strcase/README.md b/cluster-autoscaler/vendor/github.com/stoewer/go-strcase/README.md
index 0e8635d8011e..84a640e71409 100644
--- a/cluster-autoscaler/vendor/github.com/stoewer/go-strcase/README.md
+++ b/cluster-autoscaler/vendor/github.com/stoewer/go-strcase/README.md
@@ -1,5 +1,5 @@
-[![CircleCI](https://circleci.com/gh/stoewer/go-strcase/tree/master.svg?style=svg)](https://circleci.com/gh/stoewer/go-strcase/tree/master)
-[![codecov](https://codecov.io/gh/stoewer/go-strcase/branch/master/graph/badge.svg)](https://codecov.io/gh/stoewer/go-strcase)
+[![GH Actions](https://github.com/stoewer/go-strcase/actions/workflows/lint-test.yml/badge.svg?branch=master)](https://github.com/stoewer/go-strcase/actions)
+[![codecov](https://codecov.io/github/stoewer/go-strcase/branch/master/graph/badge.svg?token=c0UokYnop5)](https://codecov.io/github/stoewer/go-strcase)
[![GoDoc](https://godoc.org/github.com/stoewer/go-strcase?status.svg)](https://pkg.go.dev/github.com/stoewer/go-strcase)
---
diff --git a/cluster-autoscaler/vendor/github.com/stoewer/go-strcase/camel.go b/cluster-autoscaler/vendor/github.com/stoewer/go-strcase/camel.go
index 5c233cc8f14b..ff9e66e0ce13 100644
--- a/cluster-autoscaler/vendor/github.com/stoewer/go-strcase/camel.go
+++ b/cluster-autoscaler/vendor/github.com/stoewer/go-strcase/camel.go
@@ -27,6 +27,9 @@ func camelCase(s string, upper bool) string {
buffer = append(buffer, toUpper(curr))
} else if isLower(prev) {
buffer = append(buffer, curr)
+ } else if isUpper(prev) && isUpper(curr) && isLower(next) {
+ // Assume a case like "R" for "XRequestId"
+ buffer = append(buffer, curr)
} else {
buffer = append(buffer, toLower(curr))
}
diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go
index fa1245b18973..2924cf3a1492 100644
--- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -8,7 +8,6 @@ import (
"fmt"
"math"
"os"
- "path/filepath"
"reflect"
"regexp"
"runtime"
@@ -141,12 +140,11 @@ func CallerInfo() []string {
}
parts := strings.Split(file, "/")
- file = parts[len(parts)-1]
if len(parts) > 1 {
+ filename := parts[len(parts)-1]
dir := parts[len(parts)-2]
- if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- path, _ := filepath.Abs(file)
- callers = append(callers, fmt.Sprintf("%s:%d", path, line))
+ if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
}
}
@@ -530,7 +528,7 @@ func isNil(object interface{}) bool {
[]reflect.Kind{
reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
- reflect.Ptr, reflect.Slice},
+ reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
kind)
if isNilableKind && value.IsNil() {
@@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true // we consider nil to be equal to the nil set
}
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
-
listKind := reflect.TypeOf(list).Kind()
- subsetKind := reflect.TypeOf(subset).Kind()
-
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
+ subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
if subsetKind == reflect.Map && listKind == reflect.Map {
- listValue := reflect.ValueOf(list)
- subsetKeys := subsetValue.MapKeys()
+ subsetMap := reflect.ValueOf(subset)
+ actualMap := reflect.ValueOf(list)
- for i := 0; i < len(subsetKeys); i++ {
- subsetKey := subsetKeys[i]
- subsetElement := subsetValue.MapIndex(subsetKey).Interface()
- listElement := listValue.MapIndex(subsetKey).Interface()
+ for _, k := range subsetMap.MapKeys() {
+ ev := subsetMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
- if !ObjectsAreEqual(subsetElement, listElement) {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
+ }
+ if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
}
}
return true
}
- for i := 0; i < subsetValue.Len(); i++ {
- element := subsetValue.Index(i).Interface()
+ subsetList := reflect.ValueOf(subset)
+ for i := 0; i < subsetList.Len(); i++ {
+ element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
}
if !found {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
}
}
@@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
}
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
-
listKind := reflect.TypeOf(list).Kind()
- subsetKind := reflect.TypeOf(subset).Kind()
-
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
+ subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
if subsetKind == reflect.Map && listKind == reflect.Map {
- listValue := reflect.ValueOf(list)
- subsetKeys := subsetValue.MapKeys()
+ subsetMap := reflect.ValueOf(subset)
+ actualMap := reflect.ValueOf(list)
- for i := 0; i < len(subsetKeys); i++ {
- subsetKey := subsetKeys[i]
- subsetElement := subsetValue.MapIndex(subsetKey).Interface()
- listElement := listValue.MapIndex(subsetKey).Interface()
+ for _, k := range subsetMap.MapKeys() {
+ ev := subsetMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
- if !ObjectsAreEqual(subsetElement, listElement) {
+ if !av.IsValid() {
+ return true
+ }
+ if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
return true
}
}
@@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
}
- for i := 0; i < subsetValue.Len(); i++ {
- element := subsetValue.Index(i).Interface()
+ subsetList := reflect.ValueOf(subset)
+ for i := 0; i < subsetList.Len(); i++ {
+ element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go
index f0af8246cfc9..e6ff8dfeb201 100644
--- a/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go
+++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go
@@ -218,16 +218,22 @@ func (c *Call) Unset() *Call {
foundMatchingCall := false
- for i, call := range c.Parent.ExpectedCalls {
+ // in-place filter slice for calls to be removed - iterate from 0'th to last skipping unnecessary ones
+ var index int // write index
+ for _, call := range c.Parent.ExpectedCalls {
if call.Method == c.Method {
_, diffCount := call.Arguments.Diff(c.Arguments)
if diffCount == 0 {
foundMatchingCall = true
- // Remove from ExpectedCalls
- c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...)
+ // Remove from ExpectedCalls - just skip it
+ continue
}
}
+ c.Parent.ExpectedCalls[index] = call
+ index++
}
+ // trim slice up to last copied index
+ c.Parent.ExpectedCalls = c.Parent.ExpectedCalls[:index]
if !foundMatchingCall {
unlockOnce.Do(c.unlock)
diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/interfaces.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/interfaces.go
index 8b98a8af275f..fed037d7f3e6 100644
--- a/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/interfaces.go
+++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/interfaces.go
@@ -7,6 +7,7 @@ import "testing"
type TestingSuite interface {
T() *testing.T
SetT(*testing.T)
+ SetS(suite TestingSuite)
}
// SetupAllSuite has a SetupSuite method, which will run before the
@@ -51,3 +52,15 @@ type AfterTest interface {
type WithStats interface {
HandleStats(suiteName string, stats *SuiteInformation)
}
+
+// SetupSubTest has a SetupSubTest method, which will run before each
+// subtest in the suite.
+type SetupSubTest interface {
+ SetupSubTest()
+}
+
+// TearDownSubTest has a TearDownSubTest method, which will run after
+// each subtest in the suite have been run.
+type TearDownSubTest interface {
+ TearDownSubTest()
+}
diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go
index 895591878bf7..8b4202d8906d 100644
--- a/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go
+++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go
@@ -22,9 +22,13 @@ var matchMethod = flag.String("testify.m", "", "regular expression to select tes
// retrieving the current *testing.T context.
type Suite struct {
*assert.Assertions
+
mu sync.RWMutex
require *require.Assertions
t *testing.T
+
+ // Parent suite to have access to the implemented methods of parent struct
+ s TestingSuite
}
// T retrieves the current *testing.T context.
@@ -43,6 +47,12 @@ func (suite *Suite) SetT(t *testing.T) {
suite.require = require.New(t)
}
+// SetS needs to set the current test suite as parent
+// to get access to the parent methods
+func (suite *Suite) SetS(s TestingSuite) {
+ suite.s = s
+}
+
// Require returns a require context for suite.
func (suite *Suite) Require() *require.Assertions {
suite.mu.Lock()
@@ -85,7 +95,18 @@ func failOnPanic(t *testing.T, r interface{}) {
// Provides compatibility with go test pkg -run TestSuite/TestName/SubTestName.
func (suite *Suite) Run(name string, subtest func()) bool {
oldT := suite.T()
- defer suite.SetT(oldT)
+
+ if setupSubTest, ok := suite.s.(SetupSubTest); ok {
+ setupSubTest.SetupSubTest()
+ }
+
+ defer func() {
+ suite.SetT(oldT)
+ if tearDownSubTest, ok := suite.s.(TearDownSubTest); ok {
+ tearDownSubTest.TearDownSubTest()
+ }
+ }()
+
return oldT.Run(name, func(t *testing.T) {
suite.SetT(t)
subtest()
@@ -98,6 +119,7 @@ func Run(t *testing.T, suite TestingSuite) {
defer recoverAndFailOnPanic(t)
suite.SetT(t)
+ suite.SetS(suite)
var suiteSetupDone bool
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/api/v3/version/version.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/api/v3/version/version.go
index 16c2c10bc772..f3b389421ef5 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/api/v3/version/version.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/api/v3/version/version.go
@@ -26,7 +26,7 @@ import (
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
- Version = "3.5.5"
+ Version = "3.5.7"
APIVersion = "unknown"
// Git SHA Value will be set during build
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
index e442c3c92e83..d31ece3e24ea 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
@@ -44,16 +44,12 @@ func IsDirWriteable(dir string) error {
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
// does not exists. TouchDirAll also ensures the given directory is writable.
-func TouchDirAll(dir string) error {
+func TouchDirAll(lg *zap.Logger, dir string) error {
// If path is already a directory, MkdirAll does nothing and returns nil, so,
// first check if dir exist with an expected permission mode.
if Exist(dir) {
err := CheckDirPermission(dir, PrivateDirMode)
if err != nil {
- lg, _ := zap.NewProduction()
- if lg == nil {
- lg = zap.NewExample()
- }
lg.Warn("check file permission", zap.Error(err))
}
} else {
@@ -70,8 +66,8 @@ func TouchDirAll(dir string) error {
// CreateDirAll is similar to TouchDirAll but returns error
// if the deepest directory was not empty.
-func CreateDirAll(dir string) error {
- err := TouchDirAll(dir)
+func CreateDirAll(lg *zap.Logger, dir string) error {
+ err := TouchDirAll(lg, dir)
if err == nil {
var ns []string
ns, err = ReadDir(dir)
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
index e8ac0ca6f58a..f4492009d6ce 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
@@ -41,6 +41,12 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval
lg = zap.NewNop()
}
errC := make(chan error, 1)
+ lg.Info("started to purge file",
+ zap.String("dir", dirname),
+ zap.String("suffix", suffix),
+ zap.Uint("max", max),
+ zap.Duration("interval", interval))
+
go func() {
if donec != nil {
defer close(donec)
@@ -63,14 +69,16 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval
f := filepath.Join(dirname, newfnames[0])
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
if err != nil {
+ lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err))
break
}
if err = os.Remove(f); err != nil {
+ lg.Error("failed to remove file", zap.String("path", f), zap.Error(err))
errC <- err
return
}
if err = l.Close(); err != nil {
- lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
+ lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
errC <- err
return
}
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go
index f278a61f8a04..e1f21755d4b7 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go
@@ -14,7 +14,10 @@
package tlsutil
-import "crypto/tls"
+import (
+ "crypto/tls"
+ "fmt"
+)
// GetCipherSuite returns the corresponding cipher suite,
// and boolean value if it is supported.
@@ -37,3 +40,17 @@ func GetCipherSuite(s string) (uint16, bool) {
}
return 0, false
}
+
+// GetCipherSuites returns list of corresponding cipher suite IDs.
+func GetCipherSuites(ss []string) ([]uint16, error) {
+ cs := make([]uint16, len(ss))
+ for i, s := range ss {
+ var ok bool
+ cs[i], ok = GetCipherSuite(s)
+ if !ok {
+ return nil, fmt.Errorf("unexpected TLS cipher suite %q", s)
+ }
+ }
+
+ return cs, nil
+}
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
index e8f475eb824b..c3bc56a65b59 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
@@ -205,7 +205,7 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
)
return
}
- err = fileutil.TouchDirAll(dirpath)
+ err = fileutil.TouchDirAll(lg, dirpath)
if err != nil {
if info.Logger != nil {
info.Logger.Warn(
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go
new file mode 100644
index 000000000000..495c736365e3
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build solaris
+// +build solaris
+
+package transport
+
+import (
+ "fmt"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+ return fmt.Errorf("port reuse is not supported on Solaris")
+}
+
+func setReuseAddress(network, address string, conn syscall.RawConn) error {
+ return conn.Control(func(fd uintptr) {
+ syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+ })
+}
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go
index 432b52e0fcee..e2cc6f482863 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go
@@ -1,5 +1,19 @@
-//go:build !windows
-// +build !windows
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !solaris
+// +build !windows,!solaris
package transport
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/maintenance.go
index dbea530e66a2..a98b8ca51e1a 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/maintenance.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/maintenance.go
@@ -92,6 +92,7 @@ func NewMaintenance(c *Client) Maintenance {
err = c.getToken(dctx)
cancel()
if err != nil {
+ conn.Close()
return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err)
}
cancel = func() { conn.Close() }
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
index 04f157a1dcbb..7dc5ddae0fd5 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
@@ -74,13 +74,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien
continue
}
if c.shouldRefreshToken(lastErr, callOpts) {
- // clear auth token before refreshing it.
- // call c.Auth.Authenticate with an invalid token will always fail the auth check on the server-side,
- // if the server has not apply the patch of pr #12165 (https://github.com/etcd-io/etcd/pull/12165)
- // and a rpctypes.ErrInvalidAuthToken will recursively call c.getToken until system run out of resource.
- c.authTokenBundle.UpdateAuthToken("")
-
- gterr := c.getToken(ctx)
+ gterr := c.refreshToken(ctx)
if gterr != nil {
c.GetLogger().Warn(
"retrying of unary invoker failed to fetch new auth token",
@@ -161,6 +155,24 @@ func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
(rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision)
}
+func (c *Client) refreshToken(ctx context.Context) error {
+ if c.authTokenBundle == nil {
+ // c.authTokenBundle will be initialized only when
+ // c.Username != "" && c.Password != "".
+ //
+ // When users use the TLS CommonName based authentication, the
+ // authTokenBundle is always nil. But it's possible for the clients
+ // to get `rpctypes.ErrAuthOldRevision` response when the clients
+ // concurrently modify auth data (e.g, addUser, deleteUser etc.).
+ // In this case, there is no need to refresh the token; instead the
+ // clients just need to retry the operations (e.g. Put, Delete etc).
+ return nil
+ }
+ // clear auth token before refreshing it.
+ c.authTokenBundle.UpdateAuthToken("")
+ return c.getToken(ctx)
+}
+
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
// a new ClientStream according to the retry policy.
@@ -259,10 +271,7 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}
return true, err
}
if s.client.shouldRefreshToken(err, s.callOpts) {
- // clear auth token to avoid failure when call getToken
- s.client.authTokenBundle.UpdateAuthToken("")
-
- gterr := s.client.getToken(s.ctx)
+ gterr := s.client.refreshToken(s.ctx)
if gterr != nil {
s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
return false, err // return the original error for simplicity
diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/watch.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/watch.go
index b73925ba128a..bc886936c869 100644
--- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/watch.go
+++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/watch.go
@@ -37,6 +37,13 @@ const (
EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond
+
+ // AutoWatchID is the watcher ID passed in WatchStream.Watch when no
+ // user-provided ID is available. If pass, an ID will automatically be assigned.
+ AutoWatchID = 0
+
+ // InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch.
+ InvalidWatchID = -1
)
type Event mvccpb.Event
@@ -450,7 +457,7 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) {
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
// check watch ID for backward compatibility (<= v3.3)
- if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
+ if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") {
w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
// failed; no channel
close(ws.recvc)
@@ -481,7 +488,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
} else if ws.outc != nil {
close(ws.outc)
}
- if ws.id != -1 {
+ if ws.id != InvalidWatchID {
delete(w.substreams, ws.id)
return
}
@@ -533,6 +540,7 @@ func (w *watchGrpcStream) run() {
cancelSet := make(map[int64]struct{})
var cur *pb.WatchResponse
+ backoff := time.Millisecond
for {
select {
// Watch() requested
@@ -543,7 +551,7 @@ func (w *watchGrpcStream) run() {
// TODO: pass custom watch ID?
ws := &watcherStream{
initReq: *wreq,
- id: -1,
+ id: InvalidWatchID,
outc: outc,
// unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
@@ -649,6 +657,7 @@ func (w *watchGrpcStream) run() {
closeErr = err
return
}
+ backoff = w.backoffIfUnavailable(backoff, err)
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
@@ -669,7 +678,7 @@ func (w *watchGrpcStream) run() {
if len(w.substreams)+len(w.resuming) == 0 {
return
}
- if ws.id != -1 {
+ if ws.id != InvalidWatchID {
// client is closing an established watch; close it on the server proactively instead of waiting
// to close when the next message arrives
cancelSet[ws.id] = struct{}{}
@@ -716,9 +725,9 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
cancelReason: pbresp.CancelReason,
}
- // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
+ // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to
// indicate they should be broadcast.
- if wr.IsProgressNotify() && pbresp.WatchId == -1 {
+ if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID {
return w.broadcastResponse(wr)
}
@@ -873,7 +882,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
w.resumec = make(chan struct{})
w.joinSubstreams()
for _, ws := range w.substreams {
- ws.id = -1
+ ws.id = InvalidWatchID
w.resuming = append(w.resuming, ws)
}
// strip out nils, if any
@@ -963,6 +972,21 @@ func (w *watchGrpcStream) joinSubstreams() {
var maxBackoff = 100 * time.Millisecond
+func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration {
+ if isUnavailableErr(w.ctx, err) {
+ // retry, but backoff
+ if backoff < maxBackoff {
+ // 25% backoff factor
+ backoff = backoff + backoff/4
+ if backoff > maxBackoff {
+ backoff = maxBackoff
+ }
+ }
+ time.Sleep(backoff)
+ }
+ return backoff
+}
+
// openWatchClient retries opening a watch client until success or halt.
// manually retry in case "ws==nil && err==nil"
// TODO: remove FailFast=false
@@ -983,17 +1007,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err)
}
- if isUnavailableErr(w.ctx, err) {
- // retry, but backoff
- if backoff < maxBackoff {
- // 25% backoff factor
- backoff = backoff + backoff/4
- if backoff > maxBackoff {
- backoff = maxBackoff
- }
- }
- time.Sleep(backoff)
- }
+ backoff = w.backoffIfUnavailable(backoff, err)
}
return ws, nil
}
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/Makefile b/cluster-autoscaler/vendor/go.opencensus.io/Makefile
index b3ce3df3032d..d896edc99681 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/Makefile
+++ b/cluster-autoscaler/vendor/go.opencensus.io/Makefile
@@ -91,7 +91,7 @@ embedmd:
.PHONY: install-tools
install-tools:
- go get -u golang.org/x/lint/golint
- go get -u golang.org/x/tools/cmd/cover
- go get -u golang.org/x/tools/cmd/goimports
- go get -u github.com/rakyll/embedmd
+ go install golang.org/x/lint/golint@latest
+ go install golang.org/x/tools/cmd/cover@latest
+ go install golang.org/x/tools/cmd/goimports@latest
+ go install github.com/rakyll/embedmd@latest
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/opencensus.go b/cluster-autoscaler/vendor/go.opencensus.io/opencensus.go
index e5e4b4368c1a..11e31f421c5d 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/opencensus.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/opencensus.go
@@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io"
// Version is the current release version of OpenCensus in use.
func Version() string {
- return "0.23.0"
+ return "0.24.0"
}
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/plugin/ochttp/server.go b/cluster-autoscaler/vendor/go.opencensus.io/plugin/ochttp/server.go
index c7ea64235726..f7c8434be06c 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/plugin/ochttp/server.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/plugin/ochttp/server.go
@@ -31,14 +31,14 @@ import (
// Handler is an http.Handler wrapper to instrument your HTTP server with
// OpenCensus. It supports both stats and tracing.
//
-// Tracing
+// # Tracing
//
// This handler is aware of the incoming request's span, reading it from request
// headers as configured using the Propagation field.
// The extracted span can be accessed from the incoming request's
// context.
//
-// span := trace.FromContext(r.Context())
+// span := trace.FromContext(r.Context())
//
// The server span will be automatically ended at the end of ServeHTTP.
type Handler struct {
@@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) {
}
// wrappedResponseWriter returns a wrapped version of the original
-// ResponseWriter and only implements the same combination of additional
+//
+// ResponseWriter and only implements the same combination of additional
+//
// interfaces as the original.
// This implementation is based on https://github.com/felixge/httpsnoop.
func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/stats/doc.go b/cluster-autoscaler/vendor/go.opencensus.io/stats/doc.go
index 00d473ee0298..31477a464fd9 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/stats/doc.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/stats/doc.go
@@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording.
OpenCensus allows users to create typed measures, record measurements,
aggregate the collected data, and export the aggregated data.
-Measures
+# Measures
A measure represents a type of data point to be tracked and recorded.
For example, latency, request Mb/s, and response Mb/s are measures
@@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then
create views and collect and break down measures by the tags they are
interested in.
-Recording measurements
+# Recording measurements
Measurement is a data point to be collected for a measure. For example,
for a latency (ms) measure, 100 is a measurement that represents a 100ms
@@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide
on which measurements they want to collect by registering views. This allows
libraries to turn on the instrumentation by default.
-Exemplars
+# Exemplars
For a given recorded measurement, the associated exemplar is a diagnostic map
that gives more information about the measurement.
@@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen
When exported to a supporting back end, you should be able to easily navigate
to example traces that fell into each bucket in the Distribution.
-
*/
package stats // import "go.opencensus.io/stats"
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/stats/internal/record.go b/cluster-autoscaler/vendor/go.opencensus.io/stats/internal/record.go
index 36935e629b66..436dc791f834 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/stats/internal/record.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/stats/internal/record.go
@@ -21,5 +21,11 @@ import (
// DefaultRecorder will be called for each Record call.
var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{})
+// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but
+// avoids interface{} conversion.
+// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type,
+// but is interface{} here to avoid import loops
+var MeasurementRecorder interface{}
+
// SubscriptionReporter reports when a view subscribed with a measure.
var SubscriptionReporter func(measure string)
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/stats/record.go b/cluster-autoscaler/vendor/go.opencensus.io/stats/record.go
index 2b97283462e2..8b5b99803ce3 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/stats/record.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/stats/record.go
@@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions {
return o
}
+type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{})
+
// Record records one or multiple measurements with the same context at once.
// If there are any tags in the context, measurements will be tagged with them.
func Record(ctx context.Context, ms ...Measurement) {
- RecordWithOptions(ctx, WithMeasurements(ms...))
+ // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality
+ // (RecordOptions) we can reduce some allocations to speed up this hot path
+ if len(ms) == 0 {
+ return
+ }
+ recorder := internal.MeasurementRecorder.(measurementRecorder)
+ record := false
+ for _, m := range ms {
+ if m.desc.subscribed() {
+ record = true
+ break
+ }
+ }
+ if !record {
+ return
+ }
+ recorder(tag.FromContext(ctx), ms, nil)
+ return
}
// RecordWithTags records one or multiple measurements at once.
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/stats/view/aggregation.go b/cluster-autoscaler/vendor/go.opencensus.io/stats/view/aggregation.go
index 748bd568cda0..61f72d20da33 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/stats/view/aggregation.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/stats/view/aggregation.go
@@ -90,9 +90,9 @@ func Sum() *Aggregation {
//
// If len(bounds) >= 2 then the boundaries for bucket index i are:
//
-// [-infinity, bounds[i]) for i = 0
-// [bounds[i-1], bounds[i]) for 0 < i < length
-// [bounds[i-1], +infinity) for i = length
+// [-infinity, bounds[i]) for i = 0
+// [bounds[i-1], bounds[i]) for 0 < i < length
+// [bounds[i-1], +infinity) for i = length
//
// If len(bounds) is 0 then there is no histogram associated with the
// distribution. There will be a single bucket with boundaries
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/stats/view/collector.go b/cluster-autoscaler/vendor/go.opencensus.io/stats/view/collector.go
index ac22c93a2b57..bcd6e08c7481 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/stats/view/collector.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/stats/view/collector.go
@@ -59,8 +59,15 @@ func (c *collector) clearRows() {
// encodeWithKeys encodes the map by using values
// only associated with the keys provided.
func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
+ // Compute the buffer length we will need ahead of time to avoid resizing later
+ reqLen := 0
+ for _, k := range keys {
+ s, _ := m.Value(k)
+ // We will store each key + its length
+ reqLen += len(s) + 1
+ }
vb := &tagencoding.Values{
- Buffer: make([]byte, len(keys)),
+ Buffer: make([]byte, reqLen),
}
for _, k := range keys {
v, _ := m.Value(k)
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/stats/view/doc.go b/cluster-autoscaler/vendor/go.opencensus.io/stats/view/doc.go
index 7bbedfe1ff23..60bf0e392540 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/stats/view/doc.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/stats/view/doc.go
@@ -34,7 +34,7 @@
// Libraries can define views but it is recommended that in most cases registering
// views be left up to applications.
//
-// Exporting
+// # Exporting
//
// Collected and aggregated data can be exported to a metric collection
// backend by registering its exporter.
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/stats/view/worker.go b/cluster-autoscaler/vendor/go.opencensus.io/stats/view/worker.go
index 6e8d18b7f6d3..6a79cd8a34c3 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/stats/view/worker.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/stats/view/worker.go
@@ -33,6 +33,7 @@ func init() {
defaultWorker = NewMeter().(*worker)
go defaultWorker.start()
internal.DefaultRecorder = record
+ internal.MeasurementRecorder = recordMeasurement
}
type measureRef struct {
@@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
defaultWorker.Record(tags, ms, attachments)
}
+func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
+ defaultWorker.recordMeasurement(tags, ms, attachments)
+}
+
// Record records a set of measurements ms associated with the given tags and attachments.
func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
+ w.recordMeasurement(tags, ms.([]stats.Measurement), attachments)
+}
+
+// recordMeasurement records a set of measurements ms associated with the given tags and attachments.
+// This is the same as Record but without an interface{} type to avoid allocations
+func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
req := &recordReq{
tm: tags,
- ms: ms.([]stats.Measurement),
+ ms: ms,
attachments: attachments,
t: time.Now(),
}
@@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) {
defaultWorker.SetReportingPeriod(d)
}
+// Stop stops the default worker.
+func Stop() {
+ defaultWorker.Stop()
+}
+
// SetReportingPeriod sets the interval between reporting aggregated views in
// the program. If duration is less than or equal to zero, it enables the
// default behavior.
@@ -281,7 +297,7 @@ func (w *worker) start() {
case <-w.quit:
w.timer.Stop()
close(w.c)
- w.done <- true
+ close(w.done)
return
}
}
@@ -290,8 +306,11 @@ func (w *worker) start() {
func (w *worker) Stop() {
prodMgr := metricproducer.GlobalManager()
prodMgr.DeleteProducer(w)
-
- w.quit <- true
+ select {
+ case <-w.quit:
+ default:
+ close(w.quit)
+ }
<-w.done
}
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/tag/profile_19.go b/cluster-autoscaler/vendor/go.opencensus.io/tag/profile_19.go
index b34d95e34a2c..8fb17226fe3c 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/tag/profile_19.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/tag/profile_19.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.9
// +build go1.9
package tag
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/tag/profile_not19.go b/cluster-autoscaler/vendor/go.opencensus.io/tag/profile_not19.go
index 83adbce56b72..e28cf13cde97 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/tag/profile_not19.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/tag/profile_not19.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !go1.9
// +build !go1.9
package tag
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/trace/doc.go b/cluster-autoscaler/vendor/go.opencensus.io/trace/doc.go
index 04b1ee4f38ea..7a1616a55c5e 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/trace/doc.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/trace/doc.go
@@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing.
The following assumes a basic familiarity with OpenCensus concepts.
See http://opencensus.io
-
-Exporting Traces
+# Exporting Traces
To export collected tracing data, register at least one exporter. You can use
one of the provided exporters or write your own.
- trace.RegisterExporter(exporter)
+ trace.RegisterExporter(exporter)
By default, traces will be sampled relatively rarely. To change the sampling
frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
+ trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
Be careful about using trace.AlwaysSample in a production application with
significant traffic: a new trace will be started and exported for every request.
-Adding Spans to a Trace
+# Adding Spans to a Trace
A trace consists of a tree of spans. In Go, the current span is carried in a
context.Context.
@@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F
this to work, the function must take a context.Context as a parameter. Add these two
lines to the top of the function:
- ctx, span := trace.StartSpan(ctx, "example.com/Run")
- defer span.End()
+ ctx, span := trace.StartSpan(ctx, "example.com/Run")
+ defer span.End()
StartSpan will create a new top-level span if the context
doesn't contain another span, otherwise it will create a child span.
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/trace/lrumap.go b/cluster-autoscaler/vendor/go.opencensus.io/trace/lrumap.go
index 908c2497ed5b..80095a5f6c03 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/trace/lrumap.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/trace/lrumap.go
@@ -44,7 +44,7 @@ func (lm lruMap) len() int {
}
func (lm lruMap) keys() []interface{} {
- keys := make([]interface{}, len(lm.cacheKeys))
+ keys := make([]interface{}, 0, len(lm.cacheKeys))
for k := range lm.cacheKeys {
keys = append(keys, k)
}
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/trace/trace_go11.go b/cluster-autoscaler/vendor/go.opencensus.io/trace/trace_go11.go
index b7d8aaf28477..b8fc1e495a9c 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/trace/trace_go11.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/trace/trace_go11.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.11
// +build go1.11
package trace
diff --git a/cluster-autoscaler/vendor/go.opencensus.io/trace/trace_nongo11.go b/cluster-autoscaler/vendor/go.opencensus.io/trace/trace_nongo11.go
index e25419859c02..da488fc87401 100644
--- a/cluster-autoscaler/vendor/go.opencensus.io/trace/trace_nongo11.go
+++ b/cluster-autoscaler/vendor/go.opencensus.io/trace/trace_nongo11.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !go1.11
// +build !go1.11
package trace
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
similarity index 63%
rename from cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go
rename to cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
index f512cf6e315c..2ae8620fb6bf 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
@@ -15,14 +15,13 @@
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
import (
- "context"
-
- "google.golang.org/grpc/metadata"
-
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/baggage"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/global"
+ "go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/propagation"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
@@ -43,6 +42,10 @@ type config struct {
Filter Filter
Propagators propagation.TextMapPropagator
TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+
+ meter metric.Meter
+ rpcServerDuration instrument.Int64Histogram
}
// Option applies an option value for a config.
@@ -55,10 +58,22 @@ func newConfig(opts []Option) *config {
c := &config{
Propagators: otel.GetTextMapPropagator(),
TracerProvider: otel.GetTracerProvider(),
+ MeterProvider: global.MeterProvider(),
}
for _, o := range opts {
o.apply(c)
}
+
+ c.meter = c.MeterProvider.Meter(
+ instrumentationName,
+ metric.WithInstrumentationVersion(SemVersion()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+ var err error
+ if c.rpcServerDuration, err = c.meter.Int64Histogram("rpc.server.duration", instrument.WithUnit("ms")); err != nil {
+ otel.Handle(err)
+ }
+
return c
}
@@ -105,59 +120,16 @@ func WithTracerProvider(tp trace.TracerProvider) Option {
return tracerProviderOption{tp: tp}
}
-type metadataSupplier struct {
- metadata *metadata.MD
-}
-
-// assert that metadataSupplier implements the TextMapCarrier interface.
-var _ propagation.TextMapCarrier = &metadataSupplier{}
-
-func (s *metadataSupplier) Get(key string) string {
- values := s.metadata.Get(key)
- if len(values) == 0 {
- return ""
- }
- return values[0]
-}
-
-func (s *metadataSupplier) Set(key string, value string) {
- s.metadata.Set(key, value)
-}
+type meterProviderOption struct{ mp metric.MeterProvider }
-func (s *metadataSupplier) Keys() []string {
- out := make([]string, 0, len(*s.metadata))
- for key := range *s.metadata {
- out = append(out, key)
+func (o meterProviderOption) apply(c *config) {
+ if o.mp != nil {
+ c.MeterProvider = o.mp
}
- return out
-}
-
-// Inject injects correlation context and span context into the gRPC
-// metadata object. This function is meant to be used on outgoing
-// requests.
-func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
- c := newConfig(opts)
- inject(ctx, md, c.Propagators)
}
-func inject(ctx context.Context, md *metadata.MD, propagators propagation.TextMapPropagator) {
- propagators.Inject(ctx, &metadataSupplier{
- metadata: md,
- })
-}
-
-// Extract returns the correlation context and span context that
-// another service encoded in the gRPC metadata object with Inject.
-// This function is meant to be used on incoming requests.
-func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
- c := newConfig(opts)
- return extract(ctx, md, c.Propagators)
-}
-
-func extract(ctx context.Context, md *metadata.MD, propagators propagation.TextMapPropagator) (baggage.Baggage, trace.SpanContext) {
- ctx = propagators.Extract(ctx, &metadataSupplier{
- metadata: md,
- })
-
- return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx)
+// WithMeterProvider returns an Option to use the MeterProvider when
+// creating a Meter. If this option is not provide the global MeterProvider will be used.
+func WithMeterProvider(mp metric.MeterProvider) Option {
+ return meterProviderOption{mp: mp}
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
index 26343dfc16e9..b74d558e3777 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
@@ -20,41 +20,36 @@ import (
"context"
"io"
"net"
-
- "github.com/golang/protobuf/proto" // nolint:staticcheck
+ "strconv"
+ "time"
"google.golang.org/grpc"
grpc_codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
"go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/baggage"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
type messageType attribute.KeyValue
// Event adds an event of the messageType to the span associated with the
-// passed context with id and size (if message is a proto message).
-func (m messageType) Event(ctx context.Context, id int, message interface{}) {
+// passed context with a message id.
+func (m messageType) Event(ctx context.Context, id int, _ interface{}) {
span := trace.SpanFromContext(ctx)
- if p, ok := message.(proto.Message); ok {
- span.AddEvent("message", trace.WithAttributes(
- attribute.KeyValue(m),
- RPCMessageIDKey.Int(id),
- RPCMessageUncompressedSizeKey.Int(proto.Size(p)),
- ))
- } else {
- span.AddEvent("message", trace.WithAttributes(
- attribute.KeyValue(m),
- RPCMessageIDKey.Int(id),
- ))
+ if !span.IsRecording() {
+ return
}
+ span.AddEvent("message", trace.WithAttributes(
+ attribute.KeyValue(m),
+ RPCMessageIDKey.Int(id),
+ ))
}
var (
@@ -66,6 +61,11 @@ var (
// for use in a grpc.Dial call.
func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
cfg := newConfig(opts)
+ tracer := cfg.TracerProvider.Tracer(
+ instrumentationName,
+ trace.WithInstrumentationVersion(SemVersion()),
+ )
+
return func(
ctx context.Context,
method string,
@@ -82,14 +82,6 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
return invoker(ctx, method, req, reply, cc, callOpts...)
}
- requestMetadata, _ := metadata.FromOutgoingContext(ctx)
- metadataCopy := requestMetadata.Copy()
-
- tracer := cfg.TracerProvider.Tracer(
- instrumentationName,
- trace.WithInstrumentationVersion(SemVersion()),
- )
-
name, attr := spanInfo(method, cc.Target())
var span trace.Span
ctx, span = tracer.Start(
@@ -100,8 +92,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
)
defer span.End()
- inject(ctx, &metadataCopy, cfg.Propagators)
- ctx = metadata.NewOutgoingContext(ctx, metadataCopy)
+ ctx = inject(ctx, cfg.Propagators)
messageSent.Event(ctx, 1, req)
@@ -245,6 +236,11 @@ func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) {
// for use in a grpc.Dial call.
func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
cfg := newConfig(opts)
+ tracer := cfg.TracerProvider.Tracer(
+ instrumentationName,
+ trace.WithInstrumentationVersion(SemVersion()),
+ )
+
return func(
ctx context.Context,
desc *grpc.StreamDesc,
@@ -261,14 +257,6 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
return streamer(ctx, desc, cc, method, callOpts...)
}
- requestMetadata, _ := metadata.FromOutgoingContext(ctx)
- metadataCopy := requestMetadata.Copy()
-
- tracer := cfg.TracerProvider.Tracer(
- instrumentationName,
- trace.WithInstrumentationVersion(SemVersion()),
- )
-
name, attr := spanInfo(method, cc.Target())
var span trace.Span
ctx, span = tracer.Start(
@@ -278,8 +266,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
trace.WithAttributes(attr...),
)
- inject(ctx, &metadataCopy, cfg.Propagators)
- ctx = metadata.NewOutgoingContext(ctx, metadataCopy)
+ ctx = inject(ctx, cfg.Propagators)
s, err := streamer(ctx, desc, cc, method, callOpts...)
if err != nil {
@@ -313,6 +300,11 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
// for use in a grpc.NewServer call.
func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
cfg := newConfig(opts)
+ tracer := cfg.TracerProvider.Tracer(
+ instrumentationName,
+ trace.WithInstrumentationVersion(SemVersion()),
+ )
+
return func(
ctx context.Context,
req interface{},
@@ -327,20 +319,11 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
return handler(ctx, req)
}
- requestMetadata, _ := metadata.FromIncomingContext(ctx)
- metadataCopy := requestMetadata.Copy()
-
- bags, spanCtx := Extract(ctx, &metadataCopy, opts...)
- ctx = baggage.ContextWithBaggage(ctx, bags)
-
- tracer := cfg.TracerProvider.Tracer(
- instrumentationName,
- trace.WithInstrumentationVersion(SemVersion()),
- )
+ ctx = extract(ctx, cfg.Propagators)
name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
ctx, span := tracer.Start(
- trace.ContextWithRemoteSpanContext(ctx, spanCtx),
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
name,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attr...),
@@ -349,13 +332,22 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
messageReceived.Event(ctx, 1, req)
+ var statusCode grpc_codes.Code
+ defer func(t time.Time) {
+ elapsedTime := time.Since(t) / time.Millisecond
+ attr = append(attr, semconv.RPCGRPCStatusCodeKey.Int64(int64(statusCode)))
+ cfg.rpcServerDuration.Record(ctx, int64(elapsedTime), attr...)
+ }(time.Now())
+
resp, err := handler(ctx, req)
if err != nil {
s, _ := status.FromError(err)
+ statusCode = s.Code()
span.SetStatus(codes.Error, s.Message())
span.SetAttributes(statusCodeAttr(s.Code()))
messageSent.Event(ctx, 1, s.Proto())
} else {
+ statusCode = grpc_codes.OK
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
messageSent.Event(ctx, 1, resp)
}
@@ -409,6 +401,11 @@ func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream {
// for use in a grpc.NewServer call.
func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
cfg := newConfig(opts)
+ tracer := cfg.TracerProvider.Tracer(
+ instrumentationName,
+ trace.WithInstrumentationVersion(SemVersion()),
+ )
+
return func(
srv interface{},
ss grpc.ServerStream,
@@ -424,20 +421,11 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
return handler(srv, wrapServerStream(ctx, ss))
}
- requestMetadata, _ := metadata.FromIncomingContext(ctx)
- metadataCopy := requestMetadata.Copy()
-
- bags, spanCtx := Extract(ctx, &metadataCopy, opts...)
- ctx = baggage.ContextWithBaggage(ctx, bags)
-
- tracer := cfg.TracerProvider.Tracer(
- instrumentationName,
- trace.WithInstrumentationVersion(SemVersion()),
- )
+ ctx = extract(ctx, cfg.Propagators)
name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
ctx, span := tracer.Start(
- trace.ContextWithRemoteSpanContext(ctx, spanCtx),
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
name,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attr...),
@@ -445,7 +433,6 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
defer span.End()
err := handler(srv, wrapServerStream(ctx, ss))
-
if err != nil {
s, _ := status.FromError(err)
span.SetStatus(codes.Error, s.Message())
@@ -470,7 +457,7 @@ func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) {
// peerAttr returns attributes about the peer address.
func peerAttr(addr string) []attribute.KeyValue {
- host, port, err := net.SplitHostPort(addr)
+ host, p, err := net.SplitHostPort(addr)
if err != nil {
return []attribute.KeyValue(nil)
}
@@ -478,11 +465,25 @@ func peerAttr(addr string) []attribute.KeyValue {
if host == "" {
host = "127.0.0.1"
}
+ port, err := strconv.Atoi(p)
+ if err != nil {
+ return []attribute.KeyValue(nil)
+ }
- return []attribute.KeyValue{
- semconv.NetPeerIPKey.String(host),
- semconv.NetPeerPortKey.String(port),
+ var attr []attribute.KeyValue
+ if ip := net.ParseIP(host); ip != nil {
+ attr = []attribute.KeyValue{
+ semconv.NetSockPeerAddr(host),
+ semconv.NetSockPeerPort(port),
+ }
+ } else {
+ attr = []attribute.KeyValue{
+ semconv.NetPeerName(host),
+ semconv.NetPeerPort(port),
+ }
}
+
+ return attr
}
// peerFromCtx returns a peer address from a context, if one exists.
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
index bc214d363a24..c40f87c4f902 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
@@ -18,7 +18,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
// ParseFullMethod returns a span name following the OpenTelemetry semantic
@@ -34,10 +34,10 @@ func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) {
var attrs []attribute.KeyValue
if service := parts[0]; service != "" {
- attrs = append(attrs, semconv.RPCServiceKey.String(service))
+ attrs = append(attrs, semconv.RPCService(service))
}
if method := parts[1]; method != "" {
- attrs = append(attrs, semconv.RPCMethodKey.String(method))
+ attrs = append(attrs, semconv.RPCMethod(method))
}
return name, attrs
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
new file mode 100644
index 000000000000..d91c6df2370e
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
@@ -0,0 +1,98 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+
+ "go.opentelemetry.io/otel/baggage"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type metadataSupplier struct {
+ metadata *metadata.MD
+}
+
+// assert that metadataSupplier implements the TextMapCarrier interface.
+var _ propagation.TextMapCarrier = &metadataSupplier{}
+
+func (s *metadataSupplier) Get(key string) string {
+ values := s.metadata.Get(key)
+ if len(values) == 0 {
+ return ""
+ }
+ return values[0]
+}
+
+func (s *metadataSupplier) Set(key string, value string) {
+ s.metadata.Set(key, value)
+}
+
+func (s *metadataSupplier) Keys() []string {
+ out := make([]string, 0, len(*s.metadata))
+ for key := range *s.metadata {
+ out = append(out, key)
+ }
+ return out
+}
+
+// Inject injects correlation context and span context into the gRPC
+// metadata object. This function is meant to be used on outgoing
+// requests.
+// Deprecated: Unnecessary public func.
+func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
+ c := newConfig(opts)
+ c.Propagators.Inject(ctx, &metadataSupplier{
+ metadata: md,
+ })
+}
+
+func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+ propagators.Inject(ctx, &metadataSupplier{
+ metadata: &md,
+ })
+ return metadata.NewOutgoingContext(ctx, md)
+}
+
+// Extract returns the correlation context and span context that
+// another service encoded in the gRPC metadata object with Inject.
+// This function is meant to be used on incoming requests.
+// Deprecated: Unnecessary public func.
+func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
+ c := newConfig(opts)
+ ctx = c.Propagators.Extract(ctx, &metadataSupplier{
+ metadata: md,
+ })
+
+ return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx)
+}
+
+func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+
+ return propagators.Extract(ctx, &metadataSupplier{
+ metadata: &md,
+ })
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
index 611c7f3017a7..b65fab308f3c 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
@@ -16,7 +16,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
import (
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
// Semantic conventions for attribute keys for gRPC.
@@ -41,7 +41,7 @@ const (
// Semantic conventions for common RPC attributes.
var (
// Semantic convention for gRPC as the remoting system.
- RPCSystemGRPC = semconv.RPCSystemKey.String("grpc")
+ RPCSystemGRPC = semconv.RPCSystemGRPC
// Semantic convention for a message named message.
RPCNameMessage = RPCNameKey.String("message")
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
index bf6b2aa1c119..78cac03ed1db 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
@@ -16,7 +16,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
// Version is the current release version of the gRPC instrumentation.
func Version() string {
- return "0.35.0"
+ return "0.40.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
index d0337f3a5e45..0c3e48d55832 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -33,6 +33,7 @@ const (
// config represents the configuration options available for the http.Handler
// and http.Transport types.
type config struct {
+ ServerName string
Tracer trace.Tracer
Meter metric.Meter
Propagators propagation.TextMapPropagator
@@ -198,3 +199,11 @@ func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
c.ClientTrace = f
})
}
+
+// WithServerName returns an Option that sets the name of the (virtual) server
+// handling requests.
+func WithServerName(server string) Option {
+ return optionFunc(func(c *config) {
+ c.ServerName = server
+ })
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
index 4c037f1d8e08..d92aecc0a3d9 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -24,10 +24,10 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
+ "go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
"go.opentelemetry.io/otel/trace"
)
@@ -39,6 +39,7 @@ var _ http.Handler = &Handler{}
// to the span using the attribute.Keys defined in this package.
type Handler struct {
operation string
+ server string
handler http.Handler
tracer trace.Tracer
@@ -49,8 +50,8 @@ type Handler struct {
writeEvent bool
filters []Filter
spanNameFormatter func(string, *http.Request) string
- counters map[string]syncint64.Counter
- valueRecorders map[string]syncfloat64.Histogram
+ counters map[string]instrument.Int64Counter
+ valueRecorders map[string]instrument.Float64Histogram
publicEndpoint bool
publicEndpointFn func(*http.Request) bool
}
@@ -90,6 +91,7 @@ func (h *Handler) configure(c *config) {
h.spanNameFormatter = c.SpanNameFormatter
h.publicEndpoint = c.PublicEndpoint
h.publicEndpointFn = c.PublicEndpointFn
+ h.server = c.ServerName
}
func handleErr(err error) {
@@ -99,16 +101,16 @@ func handleErr(err error) {
}
func (h *Handler) createMeasures() {
- h.counters = make(map[string]syncint64.Counter)
- h.valueRecorders = make(map[string]syncfloat64.Histogram)
+ h.counters = make(map[string]instrument.Int64Counter)
+ h.valueRecorders = make(map[string]instrument.Float64Histogram)
- requestBytesCounter, err := h.meter.SyncInt64().Counter(RequestContentLength)
+ requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength)
handleErr(err)
- responseBytesCounter, err := h.meter.SyncInt64().Counter(ResponseContentLength)
+ responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength)
handleErr(err)
- serverLatencyMeasure, err := h.meter.SyncFloat64().Histogram(ServerLatency)
+ serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency)
handleErr(err)
h.counters[RequestContentLength] = requestBytesCounter
@@ -128,7 +130,14 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
- opts := h.spanStartOptions
+ opts := []trace.SpanStartOption{
+ trace.WithAttributes(httpconv.ServerRequest(h.server, r)...),
+ }
+ if h.server != "" {
+ hostAttr := semconv.NetHostName(h.server)
+ opts = append(opts, trace.WithAttributes(hostAttr))
+ }
+ opts = append(opts, h.spanStartOptions...)
if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
opts = append(opts, trace.WithNewRoot())
// Linking incoming span context if any for public endpoint.
@@ -137,12 +146,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
- opts = append([]trace.SpanStartOption{
- trace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", r)...),
- trace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(r)...),
- trace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(h.operation, "", r)...),
- }, opts...) // start with the configured options
-
tracer := h.tracer
if tracer == nil {
@@ -164,10 +167,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
var bw bodyWrapper
- // if request body is nil we don't want to mutate the body as it will affect
- // the identity of it in an unforeseeable way because we assert ReadCloser
- // fulfills a certain interface and it is indeed nil.
- if r.Body != nil {
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ if r.Body != nil && r.Body != http.NoBody {
bw.ReadCloser = r.Body
bw.record = readRecordFunc
r.Body = &bw
@@ -180,7 +183,13 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
- rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators}
+ rww := &respWriterWrapper{
+ ResponseWriter: w,
+ record: writeRecordFunc,
+ ctx: ctx,
+ props: h.propagators,
+ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
+ }
// Wrap w to use our ResponseWriter methods while also exposing
// other interfaces that w may implement (http.CloseNotifier,
@@ -206,7 +215,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err)
// Add metrics
- attributes := append(labeler.Get(), semconv.HTTPServerMetricAttributesFromHTTPRequest(h.operation, r)...)
+ attributes := append(labeler.Get(), httpconv.ServerRequest(h.server, r)...)
+ if rww.statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
+ }
h.counters[RequestContentLength].Add(ctx, bw.read, attributes...)
h.counters[ResponseContentLength].Add(ctx, rww.written, attributes...)
@@ -231,9 +243,10 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int,
attributes = append(attributes, WroteBytesKey.Int64(wrote))
}
if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...)
- span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer))
+ attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
}
+ span.SetStatus(httpconv.ServerStatus(statusCode))
+
if werr != nil && werr != io.EOF {
attributes = append(attributes, WriteErrorKey.String(werr.Error()))
}
@@ -245,7 +258,7 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int,
func WithRouteTag(route string, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
span := trace.SpanFromContext(r.Context())
- span.SetAttributes(semconv.HTTPRouteKey.String(route))
+ span.SetAttributes(semconv.HTTPRoute(route))
h.ServeHTTP(w, r)
})
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
index fd5e1e9bc759..9dda7e1a9571 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -23,7 +23,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
"go.opentelemetry.io/otel/trace"
)
@@ -110,7 +110,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
}
r = r.WithContext(ctx)
- span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(r)...)
+ span.SetAttributes(httpconv.ClientRequest(r)...)
t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
res, err := t.rt.RoundTrip(r)
@@ -121,8 +121,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
return res, err
}
- span.SetAttributes(semconv.HTTPAttributesFromHTTPStatusCode(res.StatusCode)...)
- span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(res.StatusCode))
+ span.SetAttributes(httpconv.ClientResponse(res)...)
+ span.SetStatus(httpconv.ClientStatus(res.StatusCode))
res.Body = newWrappedBody(span, res.Body)
return res, err
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
index 56e4736062a2..8a55855b9c7c 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
// Version is the current release version of the otelhttp instrumentation.
func Version() string {
- return "0.35.0"
+ return "0.40.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/.golangci.yml b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/.golangci.yml
index 253e3b35b520..0f099f575959 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -9,7 +9,6 @@ linters:
disable-all: true
# Specifically enable linters we want to use.
enable:
- - deadcode
- depguard
- errcheck
- godot
@@ -21,10 +20,8 @@ linters:
- misspell
- revive
- staticcheck
- - structcheck
- typecheck
- unused
- - varcheck
issues:
# Maximum issues count per one linter.
@@ -114,8 +111,9 @@ linters-settings:
- name: constant-logical-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
+ # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
- name: context-as-argument
- disabled: false
+ disabled: true
arguments:
allowTypesBefore: "*testing.T"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/.lycheeignore b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/.lycheeignore
index 545d634525d9..40d62fa2eb83 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -1,3 +1,6 @@
http://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
+https://github.com/open-telemetry/opentelemetry-go/projects
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 906e17ce94fe..1d9726f60b6f 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,401 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.14.0/0.37.0/0.0.4] 2023-02-27
+
+This release is the last to support [Go 1.18].
+The next release will require at least [Go 1.19].
+
+### Added
+
+- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697)
+- Support [Go 1.20]. (#3693)
+- The `go.opentelemetry.io/otel/semconv/v1.18.0` package.
+ The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719)
+ - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+ - `OtelScopeNameKey` -> `OTelScopeNameKey`
+ - `OtelScopeVersionKey` -> `OTelScopeVersionKey`
+ - `OtelLibraryNameKey` -> `OTelLibraryNameKey`
+ - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey`
+ - `OtelStatusCodeKey` -> `OTelStatusCodeKey`
+ - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey`
+ - `OtelStatusCodeOk` -> `OTelStatusCodeOk`
+ - `OtelStatusCodeError` -> `OTelStatusCodeError`
+ - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+ - `OtelScopeName` -> `OTelScopeName`
+ - `OtelScopeVersion` -> `OTelScopeVersion`
+ - `OtelLibraryName` -> `OTelLibraryName`
+ - `OtelLibraryVersion` -> `OTelLibraryVersion`
+ - `OtelStatusDescription` -> `OTelStatusDescription`
+- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state.
+ See the [README](./bridge/opentracing/README.md) for more information. (#3570)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739)
+- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763)
+ - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports.
+ - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted.
+
+### Changed
+
+- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679)
+- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into.
+ This change is made to enable memory reuse by SDK users. (#3732)
+- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776)
+
+### Fixed
+
+- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725)
+- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724)
+- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733)
+- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743)
+- Data race issue in OTLP exporter retry mechanism. (#3755, #3756)
+- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772)
+- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/unit` package is deprecated.
+ Use the equivalent unit string instead. (#3776)
+ - Use `"1"` instead of `unit.Dimensionless`
+ - Use `"By"` instead of `unit.Bytes`
+ - Use `"ms"` instead of `unit.Milliseconds`
+
+## [1.13.0/0.36.0] 2023-02-07
+
+### Added
+
+- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions.
+ These functions ensure semantic convention type correctness. (#3675)
+
+### Fixed
+
+- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687)
+ - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv`
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631)
+
+## [1.12.0/0.35.0] 2023-01-28
+
+### Added
+
+- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+ This options is used to configure `int64` Observer callbacks during their creation. (#3507)
+- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+ This options is used to configure `float64` Observer callbacks during their creation. (#3507)
+- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are used to enable external metric Producers. (#3524)
+- The `Callback` function type to `go.opentelemetry.io/otel/metric`.
+ This new named function type is registered with a `Meter`. (#3564)
+- The `go.opentelemetry.io/otel/semconv/v1.13.0` package.
+ The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499)
+ - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`.
+ - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`.
+- The `go.opentelemetry.io/otel/semconv/v1.14.0` package.
+ The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566)
+- The `go.opentelemetry.io/otel/semconv/v1.15.0` package.
+ The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578)
+- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
+ The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
+- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
+ These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
+ - `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
+ - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
+ - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
+ - `Int64ObservableCounter` replaces the `asyncint64.Counter`
+ - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter`
+ - `Int64ObservableGauge` replaces the `asyncint64.Gauge`
+ - `Float64Counter` replaces the `syncfloat64.Counter`
+ - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter`
+ - `Float64Histogram` replaces the `syncfloat64.Histogram`
+ - `Int64Counter` replaces the `syncint64.Counter`
+ - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter`
+ - `Int64Histogram` replaces the `syncint64.Histogram`
+- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`.
+ This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116)
+- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+ This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487)
+- The `go.opentelemetry.io/otel/semconv/v1.17.0` package.
+ The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599)
+
+### Changed
+
+- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
+- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507)
+ - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
+ - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
+ - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
+ - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`.
+- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package.
+ This `Registration` can be used to unregister callbacks. (#3522)
+- Global error handler uses an atomic value instead of a mutex. (#3543)
+- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541)
+- Global logger uses an atomic value instead of a mutex. (#3545)
+- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
+- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
+ This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
+- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name.
+ Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
+- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
+- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
+ - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter`
+ - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter`
+ - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram`
+ - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter`
+ - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter`
+ - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge`
+- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed.
+ - The named `Callback` replaces the inline function parameter. (#3564)
+ - `Callback` is required to return an error. (#3576)
+ - `Callback` accepts the added `Observer` parameter added.
+ This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584)
+ - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587)
+- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions.
+ This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint.
+ Instead it uses the `net.sock.peer` attributes. (#3581)
+- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487)
+
+### Fixed
+
+- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549)
+- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter.
+ Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated.
+ Use `NewMetricProducer` instead. (#3541)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated.
+ Use `NewTracerProvider` instead. (#3116)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520)
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Int64ObservableCounter`
+ - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter`
+ - The `Gauge` method is replaced by `Meter.Int64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Float64ObservableCounter`
+ - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter`
+ - The `Gauge` method is replaced by `Meter.Float64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Int64Counter`
+ - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter`
+ - The `Histogram` method is replaced by `Meter.Int64Histogram`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Float64Counter`
+ - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter`
+ - The `Histogram` method is replaced by `Meter.Float64Histogram`
+
+## [1.11.2/0.34.0] 2022-12-05
+
+### Added
+
+- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package.
+ This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387)
+- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter.
+ This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357)
+- OTLP exporters now recognize: (#3363)
+ - `OTEL_EXPORTER_OTLP_INSECURE`
+ - `OTEL_EXPORTER_OTLP_TRACES_INSECURE`
+ - `OTEL_EXPORTER_OTLP_METRICS_INSECURE`
+ - `OTEL_EXPORTER_OTLP_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE`
+- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459)
+- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`.
+ Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option.
+ The views registered with the `MeterProvider` apply to all `Reader`s. (#3387)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260)
+- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260)
+- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369)
+- Remove comparable requirement for `Reader`s. (#3387)
+- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
+- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
+- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
+- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
+- Reenabled Attribute Filters in the Metric SDK. (#3396)
+- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408)
+- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
+- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
+- Prevent duplicate Prometheus description, unit, and type. (#3469)
+- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
+
+### Removed
+
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated.
+ Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476)
+
+## [1.11.1/0.33.0] 2022-10-19
+
+### Added
+
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
+ By default, it will register with the default Prometheus registerer.
+ A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
+- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
+ It will return an error if the exporter fails to register with Prometheus. (#3239)
+
+### Fixed
+
+- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
+- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
+ This fixes the implementation to be compliant with the W3C specification. (#3226)
+- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
+- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
+- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
+- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
+- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
+ Instead the exporter is defined as an "unchecked" collector for Prometheus.
+ This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
+ This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
+
+## [1.11.0/0.32.3] 2022-10-12
+
+### Added
+
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
+
+### Changed
+
+- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
+- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
+ This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
+
+## [0.32.2] Metric SDK (Alpha) - 2022-10-11
+
+### Added
+
+- Added an example of using metric views to customize instruments. (#3177)
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
+
+### Changed
+
+- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
+- Update histogram default bounds to match the requirements of the latest specification. (#3222)
+- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265)
+
+### Fixed
+
+- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
+- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
+- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
+- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
+- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
+
+## [0.32.1] Metric SDK (Alpha) - 2022-09-22
+
+### Changed
+
+- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
+ Invalid characters are replaced with `_`. (#3212)
+
+### Added
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
+- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
+
+### Fixed
+
+- Updated go.mods to point to valid versions of the sdk. (#3216)
+- Set the `MeterProvider` resource on all exported metric data. (#3218)
+
+## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
+
+### Changed
+
+- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
+ Please see the package documentation for how the new SDK is initialized and configured. (#3175)
+- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
+
+### Removed
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
+ A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
+ A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
+- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
+
## [1.10.0] - 2022-09-09
### Added
@@ -191,7 +586,7 @@ Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be mod
- `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
- `OTEL_SPAN_LINK_COUNT_LIMIT`
- `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
-
+
If the provided environment variables are invalid (negative), the default values would be used.
- Rename the `gc` runtime name to `go` (#2560)
- Add resource container ID detection. (#2418)
@@ -1907,7 +2302,16 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.14.0...HEAD
+[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0
+[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0
+[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
+[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
+[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
+[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
+[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
+[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
+[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
@@ -1959,3 +2363,7 @@ It contains api and sdk for trace and meter.
[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+
+[Go 1.20]: https://go.dev/doc/go1.20
+[Go 1.19]: https://go.dev/doc/go1.19
+[Go 1.18]: https://go.dev/doc/go1.18
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 9371a481ab12..a6928bfdff80 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -508,7 +508,7 @@ Approvers:
- [David Ashpole](https://github.com/dashpole), Google
- [Robert Pająk](https://github.com/pellared), Splunk
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
-- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta
+- [Damien Mathieu](https://github.com/dmathieu), Elastic
Maintainers:
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/Makefile b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/Makefile
index 18ffaa33a99c..0e6ffa284e1c 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/Makefile
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/Makefile
@@ -17,7 +17,7 @@ TOOLS_MOD_DIR := ./internal/tools
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
-ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
+ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
GO = go
TIMEOUT = 60
@@ -156,7 +156,7 @@ go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
- && $(GO) mod tidy -compat=1.17
+ && $(GO) mod tidy -compat=1.18
.PHONY: lint-modules
lint-modules: go-mod-tidy
@@ -208,11 +208,12 @@ check-clean-work-tree:
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
- @[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
- @[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
- @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/trace" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/resource" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- @$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
+ [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
+ [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
+ $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: prerelease
prerelease: | $(MULTIMOD)
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/README.md b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/README.md
index 4aeecb8bfe72..878d87e58b91 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/README.md
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/README.md
@@ -50,21 +50,21 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
| ------- | ---------- | ------------ |
+| Ubuntu | 1.20 | amd64 |
| Ubuntu | 1.19 | amd64 |
| Ubuntu | 1.18 | amd64 |
-| Ubuntu | 1.17 | amd64 |
+| Ubuntu | 1.20 | 386 |
| Ubuntu | 1.19 | 386 |
| Ubuntu | 1.18 | 386 |
-| Ubuntu | 1.17 | 386 |
+| MacOS | 1.20 | amd64 |
| MacOS | 1.19 | amd64 |
| MacOS | 1.18 | amd64 |
-| MacOS | 1.17 | amd64 |
+| Windows | 1.20 | amd64 |
| Windows | 1.19 | amd64 |
| Windows | 1.18 | amd64 |
-| Windows | 1.17 | amd64 |
+| Windows | 1.20 | 386 |
| Windows | 1.19 | 386 |
| Windows | 1.18 | 386 |
-| Windows | 1.17 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/RELEASING.md b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/RELEASING.md
index 71e576254795..77d56c936515 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -6,20 +6,25 @@ New versions of the [OpenTelemetry specification] mean new versions of the `semc
The `semconv-generate` make target is used for this.
1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag.
-2. Run the `make semconv-generate ...` target from this repository.
+2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
+3. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
-export TAG="v1.7.0" # Change to the release version you are generating.
+export TAG="v1.13.0" # Change to the release version you are generating.
export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
-git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG"
+git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG"
+docker pull otel/semconvgen:latest
make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
```
This should create a new sub-package of [`semconv`](./semconv).
Ensure things look correct before submitting a pull request to include the addition.
+**Note**, the generation code was changed to generate versions >= 1.13.
+To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)).
+
## Pre-Release
First, decide which module sets will be released and update their versions
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/attribute/value.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/attribute/value.go
index 57899f682e7a..cb21dd5c0961 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -17,9 +17,11 @@ package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"encoding/json"
"fmt"
+ "reflect"
"strconv"
"go.opentelemetry.io/otel/internal"
+ "go.opentelemetry.io/otel/internal/attribute"
)
//go:generate stringer -type=Type
@@ -66,12 +68,7 @@ func BoolValue(v bool) Value {
// BoolSliceValue creates a BOOLSLICE Value.
func BoolSliceValue(v []bool) Value {
- cp := make([]bool, len(v))
- copy(cp, v)
- return Value{
- vtype: BOOLSLICE,
- slice: &cp,
- }
+ return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)}
}
// IntValue creates an INT64 Value.
@@ -81,13 +78,14 @@ func IntValue(v int) Value {
// IntSliceValue creates an INTSLICE Value.
func IntSliceValue(v []int) Value {
- cp := make([]int64, 0, len(v))
- for _, i := range v {
- cp = append(cp, int64(i))
+ var int64Val int64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
+ for i, val := range v {
+ cp.Elem().Index(i).SetInt(int64(val))
}
return Value{
vtype: INT64SLICE,
- slice: &cp,
+ slice: cp.Elem().Interface(),
}
}
@@ -101,12 +99,7 @@ func Int64Value(v int64) Value {
// Int64SliceValue creates an INT64SLICE Value.
func Int64SliceValue(v []int64) Value {
- cp := make([]int64, len(v))
- copy(cp, v)
- return Value{
- vtype: INT64SLICE,
- slice: &cp,
- }
+ return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)}
}
// Float64Value creates a FLOAT64 Value.
@@ -119,12 +112,7 @@ func Float64Value(v float64) Value {
// Float64SliceValue creates a FLOAT64SLICE Value.
func Float64SliceValue(v []float64) Value {
- cp := make([]float64, len(v))
- copy(cp, v)
- return Value{
- vtype: FLOAT64SLICE,
- slice: &cp,
- }
+ return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)}
}
// StringValue creates a STRING Value.
@@ -137,12 +125,7 @@ func StringValue(v string) Value {
// StringSliceValue creates a STRINGSLICE Value.
func StringSliceValue(v []string) Value {
- cp := make([]string, len(v))
- copy(cp, v)
- return Value{
- vtype: STRINGSLICE,
- slice: &cp,
- }
+ return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)}
}
// Type returns a type of the Value.
@@ -159,10 +142,14 @@ func (v Value) AsBool() bool {
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
// BOOLSLICE.
func (v Value) AsBoolSlice() []bool {
- if s, ok := v.slice.(*[]bool); ok {
- return *s
+ if v.vtype != BOOLSLICE {
+ return nil
}
- return nil
+ return v.asBoolSlice()
+}
+
+func (v Value) asBoolSlice() []bool {
+ return attribute.AsBoolSlice(v.slice)
}
// AsInt64 returns the int64 value. Make sure that the Value's type is
@@ -174,10 +161,14 @@ func (v Value) AsInt64() int64 {
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
// INT64SLICE.
func (v Value) AsInt64Slice() []int64 {
- if s, ok := v.slice.(*[]int64); ok {
- return *s
+ if v.vtype != INT64SLICE {
+ return nil
}
- return nil
+ return v.asInt64Slice()
+}
+
+func (v Value) asInt64Slice() []int64 {
+ return attribute.AsInt64Slice(v.slice)
}
// AsFloat64 returns the float64 value. Make sure that the Value's
@@ -189,10 +180,14 @@ func (v Value) AsFloat64() float64 {
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
// FLOAT64SLICE.
func (v Value) AsFloat64Slice() []float64 {
- if s, ok := v.slice.(*[]float64); ok {
- return *s
+ if v.vtype != FLOAT64SLICE {
+ return nil
}
- return nil
+ return v.asFloat64Slice()
+}
+
+func (v Value) asFloat64Slice() []float64 {
+ return attribute.AsFloat64Slice(v.slice)
}
// AsString returns the string value. Make sure that the Value's type
@@ -204,10 +199,14 @@ func (v Value) AsString() string {
// AsStringSlice returns the []string value. Make sure that the Value's type is
// STRINGSLICE.
func (v Value) AsStringSlice() []string {
- if s, ok := v.slice.(*[]string); ok {
- return *s
+ if v.vtype != STRINGSLICE {
+ return nil
}
- return nil
+ return v.asStringSlice()
+}
+
+func (v Value) asStringSlice() []string {
+ return attribute.AsStringSlice(v.slice)
}
type unknownValueType struct{}
@@ -218,19 +217,19 @@ func (v Value) AsInterface() interface{} {
case BOOL:
return v.AsBool()
case BOOLSLICE:
- return v.AsBoolSlice()
+ return v.asBoolSlice()
case INT64:
return v.AsInt64()
case INT64SLICE:
- return v.AsInt64Slice()
+ return v.asInt64Slice()
case FLOAT64:
return v.AsFloat64()
case FLOAT64SLICE:
- return v.AsFloat64Slice()
+ return v.asFloat64Slice()
case STRING:
return v.stringly
case STRINGSLICE:
- return v.AsStringSlice()
+ return v.asStringSlice()
}
return unknownValueType{}
}
@@ -239,19 +238,19 @@ func (v Value) AsInterface() interface{} {
func (v Value) Emit() string {
switch v.Type() {
case BOOLSLICE:
- return fmt.Sprint(*(v.slice.(*[]bool)))
+ return fmt.Sprint(v.asBoolSlice())
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
- return fmt.Sprint(*(v.slice.(*[]int64)))
+ return fmt.Sprint(v.asInt64Slice())
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
- return fmt.Sprint(*(v.slice.(*[]float64)))
+ return fmt.Sprint(v.asFloat64Slice())
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
- return fmt.Sprint(*(v.slice.(*[]string)))
+ return fmt.Sprint(v.asStringSlice())
case STRING:
return v.stringly
default:
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index eba180e04f88..a36db8f8d85d 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -250,8 +250,9 @@ type Member struct {
hasData bool
}
-// NewMember returns a new Member from the passed arguments. An error is
-// returned if the created Member would be invalid according to the W3C
+// NewMember returns a new Member from the passed arguments. The key will be
+// used directly while the value will be url decoded after validation. An error
+// is returned if the created Member would be invalid according to the W3C
// Baggage specification.
func NewMember(key, value string, props ...Property) (Member, error) {
m := Member{
@@ -263,7 +264,11 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
-
+ decodedValue, err := url.QueryUnescape(value)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ m.value = decodedValue
return m, nil
}
@@ -328,8 +333,9 @@ func parseMember(member string) (Member, error) {
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
-// validate ensures m conforms to the W3C Baggage specification, returning an
-// error otherwise.
+// validate ensures m conforms to the W3C Baggage specification.
+// A key is just an ASCII string, but a value must be URL encoded UTF-8,
+// returning an error otherwise.
func (m Member) validate() error {
if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m)
@@ -465,6 +471,7 @@ func (b Baggage) Member(key string) Member {
key: key,
value: v.Value,
properties: fromInternalProperties(v.Properties),
+ hasData: true,
}
}
@@ -484,6 +491,7 @@ func (b Baggage) Members() []Member {
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
+ hasData: true,
})
}
return members
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/codes/codes.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/codes/codes.go
index 064a9279fd14..587ebae4e30e 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -23,10 +23,20 @@ import (
const (
// Unset is the default status code.
Unset Code = 0
+
// Error indicates the operation contains an error.
+ //
+ // NOTE: The error code in OTLP is 2.
+ // The value of this enum is only relevant to the internals
+ // of the Go SDK.
Error Code = 1
+
// Ok indicates operation has been validated by an Application developers
// or Operator to have completed successfully, or contain no error.
+ //
+ // NOTE: The Ok code in OTLP is 1.
+ // The value of this enum is only relevant to the internals
+ // of the Go SDK.
Ok Code = 2
maxCode = 3
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
index 67003c4a2fa9..53ff3126b6e9 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
@@ -23,6 +23,8 @@ import (
"strconv"
"strings"
"time"
+
+ "go.opentelemetry.io/otel/internal/global"
)
// ConfigFn is the generic function used to set a config.
@@ -59,13 +61,26 @@ func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
}
}
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
- if d, err := strconv.Atoi(v); err == nil {
- fn(time.Duration(d) * time.Millisecond)
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
}
+ fn(time.Duration(d) * time.Millisecond)
}
}
}
@@ -83,23 +98,59 @@ func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader)
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
- if u, err := url.Parse(v); err == nil {
- fn(u)
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
}
+ fn(u)
}
}
}
-// WithTLSConfig retrieves the specified config and passes it to ConfigFn as a crypto/tls.Config.
-func WithTLSConfig(n string, fn func(*tls.Config)) func(e *EnvOptionsReader) {
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
- if b, err := e.ReadFile(v); err == nil {
- if c, err := createTLSConfig(b); err == nil {
- fn(c)
- }
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
}
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
}
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
}
}
@@ -117,15 +168,18 @@ func stringToHeader(value string) map[string]string {
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
+ global.Error(errors.New("missing '="), "parse headers", "input", nameValue)
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
+ global.Error(err, "escape header key", "key", nameValue[0])
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
+ global.Error(err, "escape header value", "value", nameValue[1])
continue
}
trimmedValue := strings.TrimSpace(value)
@@ -136,13 +190,10 @@ func stringToHeader(value string) map[string]string {
return headers
}
-func createTLSConfig(certBytes []byte) (*tls.Config, error) {
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
-
- return &tls.Config{
- RootCAs: cp,
- }, nil
+ return cp, nil
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go
new file mode 100644
index 000000000000..9aa62ed9e8e9
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal contains common functionality for all OTLP exporters.
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
+
+import "go.opentelemetry.io/otel"
+
+// GetUserAgentHeader return an OTLP header value form "OTel OTLP Exporter Go/{{ .Version }}"
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#user-agent
+func GetUserAgentHeader() string {
+ return "OTel OTLP Exporter Go/" + otel.Version()
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
index 7994706ab514..9ab89b375747 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
@@ -16,19 +16,6 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
import "fmt"
-// PartialSuccessDropKind indicates the kind of partial success error
-// received by an OTLP exporter, which corresponds with the signal
-// being exported.
-type PartialSuccessDropKind string
-
-const (
- // TracingPartialSuccess indicates that some spans were rejected.
- TracingPartialSuccess PartialSuccessDropKind = "spans"
-
- // MetricsPartialSuccess indicates that some metric data points were rejected.
- MetricsPartialSuccess PartialSuccessDropKind = "metric data points"
-)
-
// PartialSuccess represents the underlying error for all handling
// OTLP partial success messages. Use `errors.Is(err,
// PartialSuccess{})` to test whether an error passed to the OTel
@@ -36,7 +23,7 @@ const (
type PartialSuccess struct {
ErrorMessage string
RejectedItems int64
- RejectedKind PartialSuccessDropKind
+ RejectedKind string
}
var _ error = PartialSuccess{}
@@ -56,13 +43,22 @@ func (ps PartialSuccess) Is(err error) bool {
return ok
}
-// PartialSuccessToError produces an error suitable for passing to
-// `otel.Handle()` out of the fields in a partial success response,
-// independent of which signal produced the outcome.
-func PartialSuccessToError(kind PartialSuccessDropKind, itemsRejected int64, errorMessage string) error {
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
return PartialSuccess{
ErrorMessage: errorMessage,
RejectedItems: itemsRejected,
- RejectedKind: kind,
+ RejectedKind: "metric data points",
}
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
index 3d43f7aea97d..7e1b0055a7f8 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
@@ -76,21 +76,21 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
}
}
- // Do not use NewExponentialBackOff since it calls Reset and the code here
- // must call Reset after changing the InitialInterval (this saves an
- // unnecessary call to Now).
- b := &backoff.ExponentialBackOff{
- InitialInterval: c.InitialInterval,
- RandomizationFactor: backoff.DefaultRandomizationFactor,
- Multiplier: backoff.DefaultMultiplier,
- MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
- }
- b.Reset()
-
return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
for {
err := fn(ctx)
if err == nil {
@@ -119,8 +119,8 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
delay = throttle
}
- if err := waitFunc(ctx, delay); err != nil {
- return err
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %s", ctxErr, err)
}
}
}
@@ -129,6 +129,9 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
// Allow override for testing.
var waitFunc = wait
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
func wait(ctx context.Context, delay time.Duration) error {
timer := time.NewTimer(delay)
defer timer.Stop()
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go
new file mode 100644
index 000000000000..217751da552f
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
+
+// ErrorKind is used to identify the kind of export error
+// being wrapped.
+type ErrorKind int
+
+const (
+ // TracesExport indicates the error comes from the OTLP trace exporter.
+ TracesExport ErrorKind = iota
+)
+
+// prefix returns a prefix for the Error() string.
+func (k ErrorKind) prefix() string {
+ switch k {
+ case TracesExport:
+ return "traces export: "
+ default:
+ return "unknown: "
+ }
+}
+
+// wrappedExportError wraps an OTLP exporter error with the kind of
+// signal that produced it.
+type wrappedExportError struct {
+ wrap error
+ kind ErrorKind
+}
+
+// WrapTracesError wraps an error from the OTLP exporter for traces.
+func WrapTracesError(err error) error {
+ return wrappedExportError{
+ wrap: err,
+ kind: TracesExport,
+ }
+}
+
+var _ error = wrappedExportError{}
+
+// Error attaches a prefix corresponding to the kind of exporter.
+func (t wrappedExportError) Error() string {
+ return t.kind.prefix() + t.wrap.Error()
+}
+
+// Unwrap returns the wrapped error.
+func (t wrappedExportError) Unwrap() error {
+ return t.wrap
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
index ca91fd4f489c..6e9cc0366ba2 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
@@ -12,8 +12,8 @@ go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace
## Examples
-- [Exporter setup and examples](./otlptracehttp/example_test.go)
-- [Full example sending telemetry to a local collector](../../../example/otel-collector)
+- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go)
+- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector)
## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
index c5ee6c098cca..b65802edbbdf 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -19,6 +19,7 @@ import (
"errors"
"sync"
+ "go.opentelemetry.io/otel/exporters/otlp/internal"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
@@ -45,7 +46,11 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan)
return nil
}
- return e.client.UploadTraces(ctx, protoSpans)
+ err := e.client.UploadTraces(ctx, protoSpans)
+ if err != nil {
+ return internal.WrapTracesError(err)
+ }
+ return nil
}
// Start establishes a connection to the receiving endpoint.
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
index b29f618e3de7..62c5029db2ac 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
@@ -16,6 +16,7 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/
import (
"crypto/tls"
+ "crypto/x509"
"net/url"
"os"
"path"
@@ -53,6 +54,7 @@ func ApplyHTTPEnvConfigs(cfg Config) Config {
func getOptionsFromEnv() []GenericOption {
opts := []GenericOption{}
+ tlsConf := &tls.Config{}
DefaultEnvOptionsReader.Apply(
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
opts = append(opts, withEndpointScheme(u))
@@ -81,8 +83,13 @@ func getOptionsFromEnv() []GenericOption {
return cfg
}, withEndpointForGRPC(u)))
}),
- envconfig.WithTLSConfig("CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
- envconfig.WithTLSConfig("TRACES_CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
@@ -125,3 +132,19 @@ func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOpt
}
}
}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go
index 56e83b85334e..c48ffd530814 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go
@@ -97,6 +97,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
Timeout: DefaultTimeout,
},
RetryConfig: retry.DefaultConfig,
+ DialOptions: []grpc.DialOption{grpc.WithUserAgent(internal.GetUserAgentHeader())},
}
cfg = ApplyGRPCEnvConfigs(cfg)
for _, opt := range opts {
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
index 9d6e1898b14d..fe23f8e3766f 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -202,11 +202,12 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
ResourceSpans: protoSpans,
})
if resp != nil && resp.PartialSuccess != nil {
- otel.Handle(internal.PartialSuccessToError(
- internal.TracingPartialSuccess,
- resp.PartialSuccess.RejectedSpans,
- resp.PartialSuccess.ErrorMessage,
- ))
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedSpans()
+ if n != 0 || msg != "" {
+ err := internal.TracePartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
}
// nil is converted to OK.
if status.Code(err) == codes.OK {
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/handler.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/handler.go
index 36cf09f72908..ecd363ab5165 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/handler.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/handler.go
@@ -17,7 +17,8 @@ package otel // import "go.opentelemetry.io/otel"
import (
"log"
"os"
- "sync"
+ "sync/atomic"
+ "unsafe"
)
var (
@@ -34,28 +35,26 @@ var (
)
type delegator struct {
- lock *sync.RWMutex
- eh ErrorHandler
+ delegate unsafe.Pointer
}
func (d *delegator) Handle(err error) {
- d.lock.RLock()
- defer d.lock.RUnlock()
- d.eh.Handle(err)
+ d.getDelegate().Handle(err)
+}
+
+func (d *delegator) getDelegate() ErrorHandler {
+ return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate))
}
// setDelegate sets the ErrorHandler delegate.
func (d *delegator) setDelegate(eh ErrorHandler) {
- d.lock.Lock()
- defer d.lock.Unlock()
- d.eh = eh
+ atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh))
}
func defaultErrorHandler() *delegator {
- return &delegator{
- lock: &sync.RWMutex{},
- eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)},
- }
+ d := &delegator{}
+ d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
+ return d
}
// errLogger logs errors if no delegate is set, otherwise they are delegated.
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
new file mode 100644
index 000000000000..622c3ee3f276
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
@@ -0,0 +1,111 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package attribute provide several helper functions for some commonly used
+logic of processing attributes.
+*/
+package attribute // import "go.opentelemetry.io/otel/internal/attribute"
+
+import (
+ "reflect"
+)
+
+// BoolSliceValue converts a bool slice into an array with same elements as slice.
+func BoolSliceValue(v []bool) interface{} {
+ var zero bool
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+ copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v)
+ return cp.Elem().Interface()
+}
+
+// Int64SliceValue converts an int64 slice into an array with same elements as slice.
+func Int64SliceValue(v []int64) interface{} {
+ var zero int64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+ copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v)
+ return cp.Elem().Interface()
+}
+
+// Float64SliceValue converts a float64 slice into an array with same elements as slice.
+func Float64SliceValue(v []float64) interface{} {
+ var zero float64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+ copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v)
+ return cp.Elem().Interface()
+}
+
+// StringSliceValue converts a string slice into an array with same elements as slice.
+func StringSliceValue(v []string) interface{} {
+ var zero string
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+ copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v)
+ return cp.Elem().Interface()
+}
+
+// AsBoolSlice converts a bool array into a slice into with same elements as array.
+func AsBoolSlice(v interface{}) []bool {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ var zero bool
+ correctLen := rv.Len()
+ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+ cpy := reflect.New(correctType)
+ _ = reflect.Copy(cpy.Elem(), rv)
+ return cpy.Elem().Slice(0, correctLen).Interface().([]bool)
+}
+
+// AsInt64Slice converts an int64 array into a slice into with same elements as array.
+func AsInt64Slice(v interface{}) []int64 {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ var zero int64
+ correctLen := rv.Len()
+ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+ cpy := reflect.New(correctType)
+ _ = reflect.Copy(cpy.Elem(), rv)
+ return cpy.Elem().Slice(0, correctLen).Interface().([]int64)
+}
+
+// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
+func AsFloat64Slice(v interface{}) []float64 {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ var zero float64
+ correctLen := rv.Len()
+ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+ cpy := reflect.New(correctType)
+ _ = reflect.Copy(cpy.Elem(), rv)
+ return cpy.Elem().Slice(0, correctLen).Interface().([]float64)
+}
+
+// AsStringSlice converts a string array into a slice into with same elements as array.
+func AsStringSlice(v interface{}) []string {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ var zero string
+ correctLen := rv.Len()
+ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+ cpy := reflect.New(correctType)
+ _ = reflect.Copy(cpy.Elem(), rv)
+ return cpy.Elem().Slice(0, correctLen).Interface().([]string)
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index ccb3258711a0..293c08961fbc 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -17,7 +17,8 @@ package global // import "go.opentelemetry.io/otel/internal/global"
import (
"log"
"os"
- "sync"
+ "sync/atomic"
+ "unsafe"
"github.com/go-logr/logr"
"github.com/go-logr/stdr"
@@ -27,37 +28,36 @@ import (
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
-var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
-var globalLoggerLock = &sync.RWMutex{}
+var globalLogger unsafe.Pointer
+
+func init() {
+ SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+}
// SetLogger overrides the globalLogger with l.
//
// To see Info messages use a logger with `l.V(1).Enabled() == true`
// To see Debug messages use a logger with `l.V(5).Enabled() == true`.
func SetLogger(l logr.Logger) {
- globalLoggerLock.Lock()
- defer globalLoggerLock.Unlock()
- globalLogger = l
+ atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
+}
+
+func getLogger() logr.Logger {
+ return *(*logr.Logger)(atomic.LoadPointer(&globalLogger))
}
// Info prints messages about the general state of the API or SDK.
// This should usually be less then 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) {
- globalLoggerLock.RLock()
- defer globalLoggerLock.RUnlock()
- globalLogger.V(1).Info(msg, keysAndValues...)
+ getLogger().V(1).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) {
- globalLoggerLock.RLock()
- defer globalLoggerLock.RUnlock()
- globalLogger.Error(err, msg, keysAndValues...)
+ getLogger().Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) {
- globalLoggerLock.RLock()
- defer globalLoggerLock.RUnlock()
- globalLogger.V(5).Info(msg, keysAndValues...)
+ getLogger().V(5).Info(msg, keysAndValues...)
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/config.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/config.go
index 621e4c5fcb83..778ad2d748b5 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/config.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -14,17 +14,30 @@
package metric // import "go.opentelemetry.io/otel/metric"
+import "go.opentelemetry.io/otel/attribute"
+
// MeterConfig contains options for Meters.
type MeterConfig struct {
instrumentationVersion string
schemaURL string
+ attrs attribute.Set
+
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
}
-// InstrumentationVersion is the version of the library providing instrumentation.
+// InstrumentationVersion returns the version of the library providing
+// instrumentation.
func (cfg MeterConfig) InstrumentationVersion() string {
return cfg.instrumentationVersion
}
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (cfg MeterConfig) InstrumentationAttributes() attribute.Set {
+ return cfg.attrs
+}
+
// SchemaURL is the schema_url of the library providing instrumentation.
func (cfg MeterConfig) SchemaURL() string {
return cfg.schemaURL
@@ -60,6 +73,16 @@ func WithInstrumentationVersion(version string) MeterOption {
})
}
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
// WithSchemaURL sets the schema URL.
func WithSchemaURL(schemaURL string) MeterOption {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/global/global.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/global/global.go
index 05a67c2e9995..cb0896d38acf 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/global/global.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/global/global.go
@@ -30,7 +30,7 @@ func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter
return MeterProvider().Meter(instrumentationName, opts...)
}
-// MeterProvider returns the registered global trace provider.
+// MeterProvider returns the registered global meter provider.
// If none is registered then a No-op MeterProvider is returned.
func MeterProvider() metric.MeterProvider {
return global.MeterProvider()
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go
new file mode 100644
index 000000000000..0b5d5a99c0f6
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go
@@ -0,0 +1,130 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Float64Observable describes a set of instruments used asynchronously to
+// record float64 measurements once per collection cycle. Observations of
+// these instruments are only made within a callback.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64Observable interface {
+ Asynchronous
+
+ float64Observable()
+}
+
+// Float64ObservableCounter is an instrument used to asynchronously record
+// increasing float64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64ObservableCounter interface{ Float64Observable }
+
+// Float64ObservableUpDownCounter is an instrument used to asynchronously
+// record float64 measurements once per collection cycle. Observations are only
+// made within a callback for this instrument. The value observed is assumed
+// the to be the cumulative sum of the count.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64ObservableUpDownCounter interface{ Float64Observable }
+
+// Float64ObservableGauge is an instrument used to asynchronously record
+// instantaneous float64 measurements once per collection cycle. Observations
+// are only made within a callback for this instrument.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64ObservableGauge interface{ Float64Observable }
+
+// Float64Observer is a recorder of float64 measurements.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64Observer interface {
+ Observe(value float64, attributes ...attribute.KeyValue)
+}
+
+// Float64Callback is a function registered with a Meter that makes
+// observations for a Float64Observerable instrument it is registered with.
+// Calls to the Float64Observer record measurement values for the
+// Float64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Float64Callbacks. Meaning, it should not report measurements with the same
+// attributes as another Float64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Float64Callback func(context.Context, Float64Observer) error
+
+// Float64ObserverConfig contains options for Asynchronous instruments that
+// observe float64 values.
+type Float64ObserverConfig struct {
+ description string
+ unit string
+ callbacks []Float64Callback
+}
+
+// NewFloat64ObserverConfig returns a new Float64ObserverConfig with all opts
+// applied.
+func NewFloat64ObserverConfig(opts ...Float64ObserverOption) Float64ObserverConfig {
+ var config Float64ObserverConfig
+ for _, o := range opts {
+ config = o.applyFloat64Observer(config)
+ }
+ return config
+}
+
+// Description returns the Config description.
+func (c Float64ObserverConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the Config unit.
+func (c Float64ObserverConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the Config callbacks.
+func (c Float64ObserverConfig) Callbacks() []Float64Callback {
+ return c.callbacks
+}
+
+// Float64ObserverOption applies options to float64 Observer instruments.
+type Float64ObserverOption interface {
+ applyFloat64Observer(Float64ObserverConfig) Float64ObserverConfig
+}
+
+type float64ObserverOptionFunc func(Float64ObserverConfig) Float64ObserverConfig
+
+func (fn float64ObserverOptionFunc) applyFloat64Observer(cfg Float64ObserverConfig) Float64ObserverConfig {
+ return fn(cfg)
+}
+
+// WithFloat64Callback adds callback to be called for an instrument.
+func WithFloat64Callback(callback Float64Callback) Float64ObserverOption {
+ return float64ObserverOptionFunc(func(cfg Float64ObserverConfig) Float64ObserverConfig {
+ cfg.callbacks = append(cfg.callbacks, callback)
+ return cfg
+ })
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
deleted file mode 100644
index 370715f694ca..000000000000
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package asyncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
-)
-
-// InstrumentProvider provides access to individual instruments.
-type InstrumentProvider interface {
- // Counter creates an instrument for recording increasing values.
- Counter(name string, opts ...instrument.Option) (Counter, error)
-
- // UpDownCounter creates an instrument for recording changes of a value.
- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
-
- // Gauge creates an instrument for recording the current value.
- Gauge(name string, opts ...instrument.Option) (Gauge, error)
-}
-
-// Counter is an instrument that records increasing values.
-type Counter interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
-
-// UpDownCounter is an instrument that records increasing or decreasing values.
-type UpDownCounter interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
-
-// Gauge is an instrument that records independent readings.
-type Gauge interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go
new file mode 100644
index 000000000000..05feeacb0538
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go
@@ -0,0 +1,130 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Int64Observable describes a set of instruments used asynchronously to record
+// int64 measurements once per collection cycle. Observations of these
+// instruments are only made within a callback.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64Observable interface {
+ Asynchronous
+
+ int64Observable()
+}
+
+// Int64ObservableCounter is an instrument used to asynchronously record
+// increasing int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64ObservableCounter interface{ Int64Observable }
+
+// Int64ObservableUpDownCounter is an instrument used to asynchronously record
+// int64 measurements once per collection cycle. Observations are only made
+// within a callback for this instrument. The value observed is assumed the to
+// be the cumulative sum of the count.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64ObservableUpDownCounter interface{ Int64Observable }
+
+// Int64ObservableGauge is an instrument used to asynchronously record
+// instantaneous int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64ObservableGauge interface{ Int64Observable }
+
+// Int64Observer is a recorder of int64 measurements.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64Observer interface {
+ Observe(value int64, attributes ...attribute.KeyValue)
+}
+
+// Int64Callback is a function registered with a Meter that makes
+// observations for a Int64Observerable instrument it is registered with.
+// Calls to the Int64Observer record measurement values for the
+// Int64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Int64Callback. Meaning, it should not report measurements with the same
+// attributes as another Int64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Int64Callback func(context.Context, Int64Observer) error
+
+// Int64ObserverConfig contains options for Asynchronous instruments that
+// observe int64 values.
+type Int64ObserverConfig struct {
+ description string
+ unit string
+ callbacks []Int64Callback
+}
+
+// NewInt64ObserverConfig returns a new Int64ObserverConfig with all opts
+// applied.
+func NewInt64ObserverConfig(opts ...Int64ObserverOption) Int64ObserverConfig {
+ var config Int64ObserverConfig
+ for _, o := range opts {
+ config = o.applyInt64Observer(config)
+ }
+ return config
+}
+
+// Description returns the Config description.
+func (c Int64ObserverConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the Config unit.
+func (c Int64ObserverConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the Config callbacks.
+func (c Int64ObserverConfig) Callbacks() []Int64Callback {
+ return c.callbacks
+}
+
+// Int64ObserverOption applies options to int64 Observer instruments.
+type Int64ObserverOption interface {
+ applyInt64Observer(Int64ObserverConfig) Int64ObserverConfig
+}
+
+type int64ObserverOptionFunc func(Int64ObserverConfig) Int64ObserverConfig
+
+func (fn int64ObserverOptionFunc) applyInt64Observer(cfg Int64ObserverConfig) Int64ObserverConfig {
+ return fn(cfg)
+}
+
+// WithInt64Callback adds callback to be called for an instrument.
+func WithInt64Callback(callback Int64Callback) Int64ObserverOption {
+ return int64ObserverOptionFunc(func(cfg Int64ObserverConfig) Int64ObserverConfig {
+ cfg.callbacks = append(cfg.callbacks, callback)
+ return cfg
+ })
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
deleted file mode 100644
index 41a561bc4a2c..000000000000
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package asyncint64 // import "go.opentelemetry.io/otel/metric/instrument/asyncint64"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
-)
-
-// InstrumentProvider provides access to individual instruments.
-type InstrumentProvider interface {
- // Counter creates an instrument for recording increasing values.
- Counter(name string, opts ...instrument.Option) (Counter, error)
-
- // UpDownCounter creates an instrument for recording changes of a value.
- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
-
- // Gauge creates an instrument for recording the current value.
- Gauge(name string, opts ...instrument.Option) (Gauge, error)
-}
-
-// Counter is an instrument that records increasing values.
-type Counter interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
-
-// UpDownCounter is an instrument that records increasing or decreasing values.
-type UpDownCounter interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
-
-// Gauge is an instrument that records independent readings.
-type Gauge interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/config.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/config.go
deleted file mode 100644
index 8778bce16197..000000000000
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/config.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package instrument // import "go.opentelemetry.io/otel/metric/instrument"
-
-import "go.opentelemetry.io/otel/metric/unit"
-
-// Config contains options for metric instrument descriptors.
-type Config struct {
- description string
- unit unit.Unit
-}
-
-// Description describes the instrument in human-readable terms.
-func (cfg Config) Description() string {
- return cfg.description
-}
-
-// Unit describes the measurement unit for an instrument.
-func (cfg Config) Unit() unit.Unit {
- return cfg.unit
-}
-
-// Option is an interface for applying metric instrument options.
-type Option interface {
- applyInstrument(Config) Config
-}
-
-// NewConfig creates a new Config and applies all the given options.
-func NewConfig(opts ...Option) Config {
- var config Config
- for _, o := range opts {
- config = o.applyInstrument(config)
- }
- return config
-}
-
-type optionFunc func(Config) Config
-
-func (fn optionFunc) applyInstrument(cfg Config) Config {
- return fn(cfg)
-}
-
-// WithDescription applies provided description.
-func WithDescription(desc string) Option {
- return optionFunc(func(cfg Config) Config {
- cfg.description = desc
- return cfg
- })
-}
-
-// WithUnit applies provided unit.
-func WithUnit(u unit.Unit) Option {
- return optionFunc(func(cfg Config) Config {
- cfg.unit = u
- return cfg
- })
-}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go
index e1bbb850d76d..f6dd9e890f4b 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go
@@ -28,3 +28,61 @@ type Asynchronous interface {
type Synchronous interface {
synchronous()
}
+
+// Option applies options to all instruments.
+type Option interface {
+ Float64ObserverOption
+ Int64ObserverOption
+ Float64Option
+ Int64Option
+}
+
+type descOpt string
+
+func (o descOpt) applyFloat64(c Float64Config) Float64Config {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64(c Int64Config) Int64Config {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig {
+ c.description = string(o)
+ return c
+}
+
+// WithDescription sets the instrument description.
+func WithDescription(desc string) Option { return descOpt(desc) }
+
+type unitOpt string
+
+func (o unitOpt) applyFloat64(c Float64Config) Float64Config {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64(c Int64Config) Int64Config {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig {
+ c.unit = string(o)
+ return c
+}
+
+// WithUnit sets the instrument unit.
+func WithUnit(u string) Option { return unitOpt(u) }
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go
new file mode 100644
index 000000000000..2cdfeb2691af
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go
@@ -0,0 +1,85 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Float64Counter is an instrument that records increasing float64 values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64Counter interface {
+ // Add records a change to the counter.
+ Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
+
+ Synchronous
+}
+
+// Float64UpDownCounter is an instrument that records increasing or decreasing
+// float64 values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64UpDownCounter interface {
+ // Add records a change to the counter.
+ Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
+
+ Synchronous
+}
+
+// Float64Histogram is an instrument that records a distribution of float64
+// values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64Histogram interface {
+ // Record adds an additional value to the distribution.
+ Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
+
+ Synchronous
+}
+
+// Float64Config contains options for Asynchronous instruments that
+// observe float64 values.
+type Float64Config struct {
+ description string
+ unit string
+}
+
+// Float64Config contains options for Synchronous instruments that record
+// float64 values.
+func NewFloat64Config(opts ...Float64Option) Float64Config {
+ var config Float64Config
+ for _, o := range opts {
+ config = o.applyFloat64(config)
+ }
+ return config
+}
+
+// Description returns the Config description.
+func (c Float64Config) Description() string {
+ return c.description
+}
+
+// Unit returns the Config unit.
+func (c Float64Config) Unit() string {
+ return c.unit
+}
+
+// Float64Option applies options to synchronous float64 instruments.
+type Float64Option interface {
+ applyFloat64(Float64Config) Float64Config
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go
deleted file mode 100644
index 435db1127bc1..000000000000
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
-)
-
-// InstrumentProvider provides access to individual instruments.
-type InstrumentProvider interface {
- // Counter creates an instrument for recording increasing values.
- Counter(name string, opts ...instrument.Option) (Counter, error)
- // UpDownCounter creates an instrument for recording changes of a value.
- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
- // Histogram creates an instrument for recording a distribution of values.
- Histogram(name string, opts ...instrument.Option) (Histogram, error)
-}
-
-// Counter is an instrument that records increasing values.
-type Counter interface {
- // Add records a change to the counter.
- Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
-
-// UpDownCounter is an instrument that records increasing or decreasing values.
-type UpDownCounter interface {
- // Add records a change to the counter.
- Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
-
-// Histogram is an instrument that records a distribution of values.
-type Histogram interface {
- // Record adds an additional value to the distribution.
- Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go
new file mode 100644
index 000000000000..e212c6d695f7
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go
@@ -0,0 +1,85 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Int64Counter is an instrument that records increasing int64 values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64Counter interface {
+ // Add records a change to the counter.
+ Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
+
+ Synchronous
+}
+
+// Int64UpDownCounter is an instrument that records increasing or decreasing
+// int64 values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64UpDownCounter interface {
+ // Add records a change to the counter.
+ Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
+
+ Synchronous
+}
+
+// Int64Histogram is an instrument that records a distribution of int64
+// values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64Histogram interface {
+ // Record adds an additional value to the distribution.
+ Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
+
+ Synchronous
+}
+
+// Int64Config contains options for Synchronous instruments that record int64
+// values.
+type Int64Config struct {
+ description string
+ unit string
+}
+
+// NewInt64Config returns a new Int64Config with all opts
+// applied.
+func NewInt64Config(opts ...Int64Option) Int64Config {
+ var config Int64Config
+ for _, o := range opts {
+ config = o.applyInt64(config)
+ }
+ return config
+}
+
+// Description returns the Config description.
+func (c Int64Config) Description() string {
+ return c.description
+}
+
+// Unit returns the Config unit.
+func (c Int64Config) Unit() string {
+ return c.unit
+}
+
+// Int64Option applies options to synchronous int64 instruments.
+type Int64Option interface {
+ applyInt64(Int64Config) Int64Config
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go
deleted file mode 100644
index c77a46728609..000000000000
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncint64 // import "go.opentelemetry.io/otel/metric/instrument/syncint64"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
-)
-
-// InstrumentProvider provides access to individual instruments.
-type InstrumentProvider interface {
- // Counter creates an instrument for recording increasing values.
- Counter(name string, opts ...instrument.Option) (Counter, error)
- // UpDownCounter creates an instrument for recording changes of a value.
- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
- // Histogram creates an instrument for recording a distribution of values.
- Histogram(name string, opts ...instrument.Option) (Histogram, error)
-}
-
-// Counter is an instrument that records increasing values.
-type Counter interface {
- // Add records a change to the counter.
- Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
-
-// UpDownCounter is an instrument that records increasing or decreasing values.
-type UpDownCounter interface {
- // Add records a change to the counter.
- Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
-
-// Histogram is an instrument that records a distribution of values.
-type Histogram interface {
- // Record adds an additional value to the distribution.
- Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
index aed8b6660a57..d1480fa5f3ed 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
@@ -22,23 +22,27 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/instrument"
- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/asyncint64"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
)
+// unwrapper unwraps to return the underlying instrument implementation.
+type unwrapper interface {
+ Unwrap() instrument.Asynchronous
+}
+
type afCounter struct {
- name string
- opts []instrument.Option
+ instrument.Float64Observable
- delegate atomic.Value //asyncfloat64.Counter
+ name string
+ opts []instrument.Float64ObserverOption
- instrument.Asynchronous
+ delegate atomic.Value //instrument.Float64ObservableCounter
}
+var _ unwrapper = (*afCounter)(nil)
+var _ instrument.Float64ObservableCounter = (*afCounter)(nil)
+
func (i *afCounter) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncFloat64().Counter(i.name, i.opts...)
+ ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -46,30 +50,27 @@ func (i *afCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *afCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
+func (i *afCounter) Unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncfloat64.Counter).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *afCounter) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncfloat64.Counter)
+ return ctr.(instrument.Float64ObservableCounter)
}
return nil
}
type afUpDownCounter struct {
- name string
- opts []instrument.Option
+ instrument.Float64Observable
- delegate atomic.Value //asyncfloat64.UpDownCounter
+ name string
+ opts []instrument.Float64ObserverOption
- instrument.Asynchronous
+ delegate atomic.Value //instrument.Float64ObservableUpDownCounter
}
+var _ unwrapper = (*afUpDownCounter)(nil)
+var _ instrument.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+
func (i *afUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncFloat64().UpDownCounter(i.name, i.opts...)
+ ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -77,30 +78,27 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *afUpDownCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
+func (i *afUpDownCounter) Unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncfloat64.UpDownCounter).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *afUpDownCounter) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncfloat64.UpDownCounter)
+ return ctr.(instrument.Float64ObservableUpDownCounter)
}
return nil
}
type afGauge struct {
- name string
- opts []instrument.Option
+ instrument.Float64Observable
- delegate atomic.Value //asyncfloat64.Gauge
+ name string
+ opts []instrument.Float64ObserverOption
- instrument.Asynchronous
+ delegate atomic.Value //instrument.Float64ObservableGauge
}
+var _ unwrapper = (*afGauge)(nil)
+var _ instrument.Float64ObservableGauge = (*afGauge)(nil)
+
func (i *afGauge) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncFloat64().Gauge(i.name, i.opts...)
+ ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -108,30 +106,27 @@ func (i *afGauge) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *afGauge) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
+func (i *afGauge) Unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncfloat64.Gauge).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *afGauge) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncfloat64.Gauge)
+ return ctr.(instrument.Float64ObservableGauge)
}
return nil
}
type aiCounter struct {
- name string
- opts []instrument.Option
+ instrument.Int64Observable
- delegate atomic.Value //asyncint64.Counter
+ name string
+ opts []instrument.Int64ObserverOption
- instrument.Asynchronous
+ delegate atomic.Value //instrument.Int64ObservableCounter
}
+var _ unwrapper = (*aiCounter)(nil)
+var _ instrument.Int64ObservableCounter = (*aiCounter)(nil)
+
func (i *aiCounter) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncInt64().Counter(i.name, i.opts...)
+ ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -139,30 +134,27 @@ func (i *aiCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *aiCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
+func (i *aiCounter) Unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncint64.Counter).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *aiCounter) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncint64.Counter)
+ return ctr.(instrument.Int64ObservableCounter)
}
return nil
}
type aiUpDownCounter struct {
- name string
- opts []instrument.Option
+ instrument.Int64Observable
- delegate atomic.Value //asyncint64.UpDownCounter
+ name string
+ opts []instrument.Int64ObserverOption
- instrument.Asynchronous
+ delegate atomic.Value //instrument.Int64ObservableUpDownCounter
}
+var _ unwrapper = (*aiUpDownCounter)(nil)
+var _ instrument.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+
func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncInt64().UpDownCounter(i.name, i.opts...)
+ ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -170,30 +162,27 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *aiUpDownCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncint64.UpDownCounter).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *aiUpDownCounter) unwrap() instrument.Asynchronous {
+func (i *aiUpDownCounter) Unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncint64.UpDownCounter)
+ return ctr.(instrument.Int64ObservableUpDownCounter)
}
return nil
}
type aiGauge struct {
- name string
- opts []instrument.Option
+ instrument.Int64Observable
- delegate atomic.Value //asyncint64.Gauge
+ name string
+ opts []instrument.Int64ObserverOption
- instrument.Asynchronous
+ delegate atomic.Value //instrument.Int64ObservableGauge
}
+var _ unwrapper = (*aiGauge)(nil)
+var _ instrument.Int64ObservableGauge = (*aiGauge)(nil)
+
func (i *aiGauge) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncInt64().Gauge(i.name, i.opts...)
+ ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -201,31 +190,27 @@ func (i *aiGauge) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *aiGauge) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncint64.Gauge).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *aiGauge) unwrap() instrument.Asynchronous {
+func (i *aiGauge) Unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncint64.Gauge)
+ return ctr.(instrument.Int64ObservableGauge)
}
return nil
}
-//Sync Instruments.
+// Sync Instruments.
type sfCounter struct {
name string
- opts []instrument.Option
+ opts []instrument.Float64Option
- delegate atomic.Value //syncfloat64.Counter
+ delegate atomic.Value //instrument.Float64Counter
instrument.Synchronous
}
+var _ instrument.Float64Counter = (*sfCounter)(nil)
+
func (i *sfCounter) setDelegate(m metric.Meter) {
- ctr, err := m.SyncFloat64().Counter(i.name, i.opts...)
+ ctr, err := m.Float64Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -235,21 +220,23 @@ func (i *sfCounter) setDelegate(m metric.Meter) {
func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncfloat64.Counter).Add(ctx, incr, attrs...)
+ ctr.(instrument.Float64Counter).Add(ctx, incr, attrs...)
}
}
type sfUpDownCounter struct {
name string
- opts []instrument.Option
+ opts []instrument.Float64Option
- delegate atomic.Value //syncfloat64.UpDownCounter
+ delegate atomic.Value //instrument.Float64UpDownCounter
instrument.Synchronous
}
+var _ instrument.Float64UpDownCounter = (*sfUpDownCounter)(nil)
+
func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.SyncFloat64().UpDownCounter(i.name, i.opts...)
+ ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -259,21 +246,23 @@ func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncfloat64.UpDownCounter).Add(ctx, incr, attrs...)
+ ctr.(instrument.Float64UpDownCounter).Add(ctx, incr, attrs...)
}
}
type sfHistogram struct {
name string
- opts []instrument.Option
+ opts []instrument.Float64Option
- delegate atomic.Value //syncfloat64.Histogram
+ delegate atomic.Value //instrument.Float64Histogram
instrument.Synchronous
}
+var _ instrument.Float64Histogram = (*sfHistogram)(nil)
+
func (i *sfHistogram) setDelegate(m metric.Meter) {
- ctr, err := m.SyncFloat64().Histogram(i.name, i.opts...)
+ ctr, err := m.Float64Histogram(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -283,21 +272,23 @@ func (i *sfHistogram) setDelegate(m metric.Meter) {
func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncfloat64.Histogram).Record(ctx, x, attrs...)
+ ctr.(instrument.Float64Histogram).Record(ctx, x, attrs...)
}
}
type siCounter struct {
name string
- opts []instrument.Option
+ opts []instrument.Int64Option
- delegate atomic.Value //syncint64.Counter
+ delegate atomic.Value //instrument.Int64Counter
instrument.Synchronous
}
+var _ instrument.Int64Counter = (*siCounter)(nil)
+
func (i *siCounter) setDelegate(m metric.Meter) {
- ctr, err := m.SyncInt64().Counter(i.name, i.opts...)
+ ctr, err := m.Int64Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -307,21 +298,23 @@ func (i *siCounter) setDelegate(m metric.Meter) {
func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncint64.Counter).Add(ctx, x, attrs...)
+ ctr.(instrument.Int64Counter).Add(ctx, x, attrs...)
}
}
type siUpDownCounter struct {
name string
- opts []instrument.Option
+ opts []instrument.Int64Option
- delegate atomic.Value //syncint64.UpDownCounter
+ delegate atomic.Value //instrument.Int64UpDownCounter
instrument.Synchronous
}
+var _ instrument.Int64UpDownCounter = (*siUpDownCounter)(nil)
+
func (i *siUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.SyncInt64().UpDownCounter(i.name, i.opts...)
+ ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -331,21 +324,23 @@ func (i *siUpDownCounter) setDelegate(m metric.Meter) {
func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncint64.UpDownCounter).Add(ctx, x, attrs...)
+ ctr.(instrument.Int64UpDownCounter).Add(ctx, x, attrs...)
}
}
type siHistogram struct {
name string
- opts []instrument.Option
+ opts []instrument.Int64Option
- delegate atomic.Value //syncint64.Histogram
+ delegate atomic.Value //instrument.Int64Histogram
instrument.Synchronous
}
+var _ instrument.Int64Histogram = (*siHistogram)(nil)
+
func (i *siHistogram) setDelegate(m metric.Meter) {
- ctr, err := m.SyncInt64().Histogram(i.name, i.opts...)
+ ctr, err := m.Int64Histogram(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
@@ -355,6 +350,6 @@ func (i *siHistogram) setDelegate(m metric.Meter) {
func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncint64.Histogram).Record(ctx, x, attrs...)
+ ctr.(instrument.Int64Histogram).Record(ctx, x, attrs...)
}
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go
index 0fa924f397c8..8acf632863cb 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go
@@ -15,17 +15,13 @@
package global // import "go.opentelemetry.io/otel/metric/internal/global"
import (
- "context"
+ "container/list"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/instrument"
- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/asyncint64"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
)
// meterProvider is a placeholder for a configured SDK MeterProvider.
@@ -109,7 +105,8 @@ type meter struct {
mtx sync.Mutex
instruments []delegatedInstrument
- callbacks []delegatedCallback
+
+ registry list.List
delegate atomic.Value // metric.Meter
}
@@ -135,213 +132,223 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
inst.setDelegate(meter)
}
- for _, callback := range m.callbacks {
- callback.setDelegate(meter)
+ for e := m.registry.Front(); e != nil; e = e.Next() {
+ r := e.Value.(*registration)
+ r.setDelegate(meter)
+ m.registry.Remove(e)
}
m.instruments = nil
- m.callbacks = nil
+ m.registry.Init()
}
-// AsyncInt64 is the namespace for the Asynchronous Integer instruments.
-//
-// To Observe data with instruments it must be registered in a callback.
-func (m *meter) AsyncInt64() asyncint64.InstrumentProvider {
+func (m *meter) Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.AsyncInt64()
+ return del.Int64Counter(name, options...)
}
- return (*aiInstProvider)(m)
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &siCounter{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-// AsyncFloat64 is the namespace for the Asynchronous Float instruments.
-//
-// To Observe data with instruments it must be registered in a callback.
-func (m *meter) AsyncFloat64() asyncfloat64.InstrumentProvider {
+func (m *meter) Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.AsyncFloat64()
+ return del.Int64UpDownCounter(name, options...)
}
- return (*afInstProvider)(m)
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &siUpDownCounter{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-// RegisterCallback captures the function that will be called during Collect.
-//
-// It is only valid to call Observe within the scope of the passed function,
-// and only on the instruments that were registered with this call.
-func (m *meter) RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error {
+func (m *meter) Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
- insts = unwrapInstruments(insts)
- return del.RegisterCallback(insts, function)
+ return del.Int64Histogram(name, options...)
}
-
m.mtx.Lock()
defer m.mtx.Unlock()
- m.callbacks = append(m.callbacks, delegatedCallback{
- instruments: insts,
- function: function,
- })
-
- return nil
+ i := &siHistogram{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-type wrapped interface {
- unwrap() instrument.Asynchronous
+func (m *meter) Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Int64ObservableCounter(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &aiCounter{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous {
- out := make([]instrument.Asynchronous, 0, len(instruments))
-
- for _, inst := range instruments {
- if in, ok := inst.(wrapped); ok {
- out = append(out, in.unwrap())
- } else {
- out = append(out, inst)
- }
+func (m *meter) Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Int64ObservableUpDownCounter(name, options...)
}
-
- return out
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &aiUpDownCounter{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-// SyncInt64 is the namespace for the Synchronous Integer instruments.
-func (m *meter) SyncInt64() syncint64.InstrumentProvider {
+func (m *meter) Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.SyncInt64()
+ return del.Int64ObservableGauge(name, options...)
}
- return (*siInstProvider)(m)
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &aiGauge{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-// SyncFloat64 is the namespace for the Synchronous Float instruments.
-func (m *meter) SyncFloat64() syncfloat64.InstrumentProvider {
+func (m *meter) Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.SyncFloat64()
+ return del.Float64Counter(name, options...)
}
- return (*sfInstProvider)(m)
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &sfCounter{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-type delegatedCallback struct {
- instruments []instrument.Asynchronous
- function func(context.Context)
+func (m *meter) Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Float64UpDownCounter(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &sfUpDownCounter{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-func (c *delegatedCallback) setDelegate(m metric.Meter) {
- insts := unwrapInstruments(c.instruments)
- err := m.RegisterCallback(insts, c.function)
- if err != nil {
- otel.Handle(err)
+func (m *meter) Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Float64Histogram(name, options...)
}
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &sfHistogram{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-type afInstProvider meter
-
-// Counter creates an instrument for recording increasing values.
-func (ip *afInstProvider) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &afCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+func (m *meter) Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Float64ObservableCounter(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &afCounter{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-// UpDownCounter creates an instrument for recording changes of a value.
-func (ip *afInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &afUpDownCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+func (m *meter) Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Float64ObservableUpDownCounter(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &afUpDownCounter{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-// Gauge creates an instrument for recording the current value.
-func (ip *afInstProvider) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &afGauge{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+func (m *meter) Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Float64ObservableGauge(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &afGauge{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
}
-type aiInstProvider meter
+// RegisterCallback captures the function that will be called during Collect.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...instrument.Asynchronous) (metric.Registration, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ insts = unwrapInstruments(insts)
+ return del.RegisterCallback(f, insts...)
+ }
-// Counter creates an instrument for recording increasing values.
-func (ip *aiInstProvider) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &aiCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
-// UpDownCounter creates an instrument for recording changes of a value.
-func (ip *aiInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &aiUpDownCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+ reg := ®istration{instruments: insts, function: f}
+ e := m.registry.PushBack(reg)
+ reg.unreg = func() error {
+ m.mtx.Lock()
+ _ = m.registry.Remove(e)
+ m.mtx.Unlock()
+ return nil
+ }
+ return reg, nil
}
-// Gauge creates an instrument for recording the current value.
-func (ip *aiInstProvider) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &aiGauge{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+type wrapped interface {
+ unwrap() instrument.Asynchronous
}
-type sfInstProvider meter
+func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous {
+ out := make([]instrument.Asynchronous, 0, len(instruments))
-// Counter creates an instrument for recording increasing values.
-func (ip *sfInstProvider) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &sfCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
+ for _, inst := range instruments {
+ if in, ok := inst.(wrapped); ok {
+ out = append(out, in.unwrap())
+ } else {
+ out = append(out, inst)
+ }
+ }
-// UpDownCounter creates an instrument for recording changes of a value.
-func (ip *sfInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &sfUpDownCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+ return out
}
-// Histogram creates an instrument for recording a distribution of values.
-func (ip *sfInstProvider) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &sfHistogram{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+type registration struct {
+ instruments []instrument.Asynchronous
+ function metric.Callback
+
+ unreg func() error
+ unregMu sync.Mutex
}
-type siInstProvider meter
+func (c *registration) setDelegate(m metric.Meter) {
+ insts := unwrapInstruments(c.instruments)
-// Counter creates an instrument for recording increasing values.
-func (ip *siInstProvider) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &siCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
+ c.unregMu.Lock()
+ defer c.unregMu.Unlock()
+
+ if c.unreg == nil {
+ // Unregister already called.
+ return
+ }
-// UpDownCounter creates an instrument for recording changes of a value.
-func (ip *siInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &siUpDownCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+ reg, err := m.RegisterCallback(c.function, insts...)
+ if err != nil {
+ otel.Handle(err)
+ }
+
+ c.unreg = reg.Unregister
}
-// Histogram creates an instrument for recording a distribution of values.
-func (ip *siInstProvider) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &siHistogram{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
+func (c *registration) Unregister() error {
+ c.unregMu.Lock()
+ defer c.unregMu.Unlock()
+ if c.unreg == nil {
+ // Unregister already called.
+ return nil
+ }
+
+ var err error
+ err, c.unreg = c.unreg(), nil
+ return err
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/meter.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/meter.go
index 21fc1c499fb4..2f69d2ae54fd 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -17,44 +17,122 @@ package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/asyncint64"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
)
// MeterProvider provides access to named Meter instances, for instrumenting
// an application or library.
+//
+// Warning: methods may be added to this interface in minor releases.
type MeterProvider interface {
- // Meter creates an instance of a `Meter` interface. The instrumentationName
- // must be the name of the library providing instrumentation. This name may
- // be the same as the instrumented code only if that code provides built-in
- // instrumentation. If the instrumentationName is empty, then a
- // implementation defined default name will be used instead.
- Meter(instrumentationName string, opts ...MeterOption) Meter
+ // Meter creates an instance of a `Meter` interface. The name must be the
+ // name of the library providing instrumentation. This name may be the same
+ // as the instrumented code only if that code provides built-in
+ // instrumentation. If the name is empty, then a implementation defined
+ // default name will be used instead.
+ Meter(name string, opts ...MeterOption) Meter
}
// Meter provides access to instrument instances for recording metrics.
+//
+// Warning: methods may be added to this interface in minor releases.
type Meter interface {
- // AsyncInt64 is the namespace for the Asynchronous Integer instruments.
- //
- // To Observe data with instruments it must be registered in a callback.
- AsyncInt64() asyncint64.InstrumentProvider
+ // Int64Counter returns a new instrument identified by name and configured
+ // with options. The instrument is used to synchronously record increasing
+ // int64 measurements during a computational operation.
+ Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error)
+ // Int64UpDownCounter returns a new instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // int64 measurements during a computational operation.
+ Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error)
+ // Int64Histogram returns a new instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // the distribution of int64 measurements during a computational operation.
+ Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error)
+ // Int64ObservableCounter returns a new instrument identified by name and
+ // configured with options. The instrument is used to asynchronously record
+ // increasing int64 measurements once per a measurement collection cycle.
+ Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error)
+ // Int64ObservableUpDownCounter returns a new instrument identified by name
+ // and configured with options. The instrument is used to asynchronously
+ // record int64 measurements once per a measurement collection cycle.
+ Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error)
+ // Int64ObservableGauge returns a new instrument identified by name and
+ // configured with options. The instrument is used to asynchronously record
+ // instantaneous int64 measurements once per a measurement collection
+ // cycle.
+ Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error)
+
+ // Float64Counter returns a new instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // increasing float64 measurements during a computational operation.
+ Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error)
+ // Float64UpDownCounter returns a new instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // float64 measurements during a computational operation.
+ Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error)
+ // Float64Histogram returns a new instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // the distribution of float64 measurements during a computational
+ // operation.
+ Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error)
+ // Float64ObservableCounter returns a new instrument identified by name and
+ // configured with options. The instrument is used to asynchronously record
+ // increasing float64 measurements once per a measurement collection cycle.
+ Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error)
+ // Float64ObservableUpDownCounter returns a new instrument identified by
+ // name and configured with options. The instrument is used to
+ // asynchronously record float64 measurements once per a measurement
+ // collection cycle.
+ Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error)
+ // Float64ObservableGauge returns a new instrument identified by name and
+ // configured with options. The instrument is used to asynchronously record
+ // instantaneous float64 measurements once per a measurement collection
+ // cycle.
+ Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error)
- // AsyncFloat64 is the namespace for the Asynchronous Float instruments
+ // RegisterCallback registers f to be called during the collection of a
+ // measurement cycle.
//
- // To Observe data with instruments it must be registered in a callback.
- AsyncFloat64() asyncfloat64.InstrumentProvider
+ // If Unregister of the returned Registration is called, f needs to be
+ // unregistered and not called during collection.
+ //
+ // The instruments f is registered with are the only instruments that f may
+ // observe values for.
+ //
+ // If no instruments are passed, f should not be registered nor called
+ // during collection.
+ RegisterCallback(f Callback, instruments ...instrument.Asynchronous) (Registration, error)
+}
+
+// Callback is a function registered with a Meter that makes observations for
+// the set of instruments it is registered with. The Observer parameter is used
+// to record measurment observations for these instruments.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Callbacks. Meaning, it should not report measurements for an instrument with
+// the same attributes as another Callback will report.
+//
+// The function needs to be concurrent safe.
+type Callback func(context.Context, Observer) error
+
+// Observer records measurements for multiple instruments in a Callback.
+type Observer interface {
+ // ObserveFloat64 records the float64 value with attributes for obsrv.
+ ObserveFloat64(obsrv instrument.Float64Observable, value float64, attributes ...attribute.KeyValue)
+ // ObserveInt64 records the int64 value with attributes for obsrv.
+ ObserveInt64(obsrv instrument.Int64Observable, value int64, attributes ...attribute.KeyValue)
+}
- // RegisterCallback captures the function that will be called during Collect.
+// Registration is an token representing the unique registration of a callback
+// for a set of instruments with a Meter.
+type Registration interface {
+ // Unregister removes the callback registration from a Meter.
//
- // It is only valid to call Observe within the scope of the passed function,
- // and only on the instruments that were registered with this call.
- RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error
-
- // SyncInt64 is the namespace for the Synchronous Integer instruments
- SyncInt64() syncint64.InstrumentProvider
- // SyncFloat64 is the namespace for the Synchronous Float instruments
- SyncFloat64() syncfloat64.InstrumentProvider
+ // This method needs to be idempotent and concurrent safe.
+ Unregister() error
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/noop.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/noop.go
index e8b9a9a14588..f38619e39abc 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/noop.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/noop.go
@@ -19,10 +19,6 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/asyncint64"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
)
// NewNoopMeterProvider creates a MeterProvider that does not record any metrics.
@@ -43,139 +39,105 @@ func NewNoopMeter() Meter {
type noopMeter struct{}
-// AsyncInt64 creates an instrument that does not record any metrics.
-func (noopMeter) AsyncInt64() asyncint64.InstrumentProvider {
- return nonrecordingAsyncInt64Instrument{}
+func (noopMeter) Int64Counter(string, ...instrument.Int64Option) (instrument.Int64Counter, error) {
+ return nonrecordingSyncInt64Instrument{}, nil
}
-// AsyncFloat64 creates an instrument that does not record any metrics.
-func (noopMeter) AsyncFloat64() asyncfloat64.InstrumentProvider {
- return nonrecordingAsyncFloat64Instrument{}
+func (noopMeter) Int64UpDownCounter(string, ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) {
+ return nonrecordingSyncInt64Instrument{}, nil
}
-// SyncInt64 creates an instrument that does not record any metrics.
-func (noopMeter) SyncInt64() syncint64.InstrumentProvider {
- return nonrecordingSyncInt64Instrument{}
+func (noopMeter) Int64Histogram(string, ...instrument.Int64Option) (instrument.Int64Histogram, error) {
+ return nonrecordingSyncInt64Instrument{}, nil
}
-// SyncFloat64 creates an instrument that does not record any metrics.
-func (noopMeter) SyncFloat64() syncfloat64.InstrumentProvider {
- return nonrecordingSyncFloat64Instrument{}
+func (noopMeter) Int64ObservableCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) {
+ return nonrecordingAsyncInt64Instrument{}, nil
}
-// RegisterCallback creates a register callback that does not record any metrics.
-func (noopMeter) RegisterCallback([]instrument.Asynchronous, func(context.Context)) error {
- return nil
+func (noopMeter) Int64ObservableUpDownCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) {
+ return nonrecordingAsyncInt64Instrument{}, nil
}
-type nonrecordingAsyncFloat64Instrument struct {
- instrument.Asynchronous
+func (noopMeter) Int64ObservableGauge(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) {
+ return nonrecordingAsyncInt64Instrument{}, nil
}
-var (
- _ asyncfloat64.InstrumentProvider = nonrecordingAsyncFloat64Instrument{}
- _ asyncfloat64.Counter = nonrecordingAsyncFloat64Instrument{}
- _ asyncfloat64.UpDownCounter = nonrecordingAsyncFloat64Instrument{}
- _ asyncfloat64.Gauge = nonrecordingAsyncFloat64Instrument{}
-)
-
-func (n nonrecordingAsyncFloat64Instrument) Counter(string, ...instrument.Option) (asyncfloat64.Counter, error) {
- return n, nil
+func (noopMeter) Float64Counter(string, ...instrument.Float64Option) (instrument.Float64Counter, error) {
+ return nonrecordingSyncFloat64Instrument{}, nil
}
-func (n nonrecordingAsyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
- return n, nil
+func (noopMeter) Float64UpDownCounter(string, ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) {
+ return nonrecordingSyncFloat64Instrument{}, nil
}
-func (n nonrecordingAsyncFloat64Instrument) Gauge(string, ...instrument.Option) (asyncfloat64.Gauge, error) {
- return n, nil
+func (noopMeter) Float64Histogram(string, ...instrument.Float64Option) (instrument.Float64Histogram, error) {
+ return nonrecordingSyncFloat64Instrument{}, nil
}
-func (nonrecordingAsyncFloat64Instrument) Observe(context.Context, float64, ...attribute.KeyValue) {
-
+func (noopMeter) Float64ObservableCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) {
+ return nonrecordingAsyncFloat64Instrument{}, nil
}
-type nonrecordingAsyncInt64Instrument struct {
- instrument.Asynchronous
+func (noopMeter) Float64ObservableUpDownCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) {
+ return nonrecordingAsyncFloat64Instrument{}, nil
}
-var (
- _ asyncint64.InstrumentProvider = nonrecordingAsyncInt64Instrument{}
- _ asyncint64.Counter = nonrecordingAsyncInt64Instrument{}
- _ asyncint64.UpDownCounter = nonrecordingAsyncInt64Instrument{}
- _ asyncint64.Gauge = nonrecordingAsyncInt64Instrument{}
-)
-
-func (n nonrecordingAsyncInt64Instrument) Counter(string, ...instrument.Option) (asyncint64.Counter, error) {
- return n, nil
+func (noopMeter) Float64ObservableGauge(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) {
+ return nonrecordingAsyncFloat64Instrument{}, nil
}
-func (n nonrecordingAsyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (asyncint64.UpDownCounter, error) {
- return n, nil
+// RegisterCallback creates a register callback that does not record any metrics.
+func (noopMeter) RegisterCallback(Callback, ...instrument.Asynchronous) (Registration, error) {
+ return noopReg{}, nil
}
-func (n nonrecordingAsyncInt64Instrument) Gauge(string, ...instrument.Option) (asyncint64.Gauge, error) {
- return n, nil
-}
+type noopReg struct{}
-func (nonrecordingAsyncInt64Instrument) Observe(context.Context, int64, ...attribute.KeyValue) {
-}
+func (noopReg) Unregister() error { return nil }
-type nonrecordingSyncFloat64Instrument struct {
- instrument.Synchronous
+type nonrecordingAsyncFloat64Instrument struct {
+ instrument.Float64Observable
}
var (
- _ syncfloat64.InstrumentProvider = nonrecordingSyncFloat64Instrument{}
- _ syncfloat64.Counter = nonrecordingSyncFloat64Instrument{}
- _ syncfloat64.UpDownCounter = nonrecordingSyncFloat64Instrument{}
- _ syncfloat64.Histogram = nonrecordingSyncFloat64Instrument{}
+ _ instrument.Float64ObservableCounter = nonrecordingAsyncFloat64Instrument{}
+ _ instrument.Float64ObservableUpDownCounter = nonrecordingAsyncFloat64Instrument{}
+ _ instrument.Float64ObservableGauge = nonrecordingAsyncFloat64Instrument{}
)
-func (n nonrecordingSyncFloat64Instrument) Counter(string, ...instrument.Option) (syncfloat64.Counter, error) {
- return n, nil
-}
-
-func (n nonrecordingSyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (syncfloat64.UpDownCounter, error) {
- return n, nil
-}
-
-func (n nonrecordingSyncFloat64Instrument) Histogram(string, ...instrument.Option) (syncfloat64.Histogram, error) {
- return n, nil
+type nonrecordingAsyncInt64Instrument struct {
+ instrument.Int64Observable
}
-func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) {
+var (
+ _ instrument.Int64ObservableCounter = nonrecordingAsyncInt64Instrument{}
+ _ instrument.Int64ObservableUpDownCounter = nonrecordingAsyncInt64Instrument{}
+ _ instrument.Int64ObservableGauge = nonrecordingAsyncInt64Instrument{}
+)
+type nonrecordingSyncFloat64Instrument struct {
+ instrument.Synchronous
}
-func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) {
+var (
+ _ instrument.Float64Counter = nonrecordingSyncFloat64Instrument{}
+ _ instrument.Float64UpDownCounter = nonrecordingSyncFloat64Instrument{}
+ _ instrument.Float64Histogram = nonrecordingSyncFloat64Instrument{}
+)
-}
+func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) {}
+func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) {}
type nonrecordingSyncInt64Instrument struct {
instrument.Synchronous
}
var (
- _ syncint64.InstrumentProvider = nonrecordingSyncInt64Instrument{}
- _ syncint64.Counter = nonrecordingSyncInt64Instrument{}
- _ syncint64.UpDownCounter = nonrecordingSyncInt64Instrument{}
- _ syncint64.Histogram = nonrecordingSyncInt64Instrument{}
+ _ instrument.Int64Counter = nonrecordingSyncInt64Instrument{}
+ _ instrument.Int64UpDownCounter = nonrecordingSyncInt64Instrument{}
+ _ instrument.Int64Histogram = nonrecordingSyncInt64Instrument{}
)
-func (n nonrecordingSyncInt64Instrument) Counter(string, ...instrument.Option) (syncint64.Counter, error) {
- return n, nil
-}
-
-func (n nonrecordingSyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (syncint64.UpDownCounter, error) {
- return n, nil
-}
-
-func (n nonrecordingSyncInt64Instrument) Histogram(string, ...instrument.Option) (syncint64.Histogram, error) {
- return n, nil
-}
-
-func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) {
-}
-func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) {
-}
+func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) {}
+func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) {}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
index 7af46c61af09..aa0f942f490e 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -22,7 +22,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
type (
@@ -60,9 +60,9 @@ var (
func (telemetrySDK) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(
semconv.SchemaURL,
- semconv.TelemetrySDKNameKey.String("opentelemetry"),
- semconv.TelemetrySDKLanguageKey.String("go"),
- semconv.TelemetrySDKVersionKey.String(otel.Version()),
+ semconv.TelemetrySDKName("opentelemetry"),
+ semconv.TelemetrySDKLanguageGo,
+ semconv.TelemetrySDKVersion(otel.Version()),
), nil
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
index 8e212b121824..f9a2a2999074 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -194,6 +194,8 @@ func WithContainer() Option {
}
// WithContainerID adds an attribute with the id of the container to the configured Resource.
+// Note: WithContainerID will not extract the correct container ID in an ECS environment.
+// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs).
func WithContainerID() Option {
return WithDetectors(cgroupContainerIDDetector{})
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
index 7a897e969777..318dcf82fe29 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -22,7 +22,7 @@ import (
"os"
"regexp"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
type containerIDProvider func() (string, error)
@@ -47,7 +47,7 @@ func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error)
if containerID == "" {
return Empty(), nil
}
- return NewWithAttributes(semconv.SchemaURL, semconv.ContainerIDKey.String(containerID)), nil
+ return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil
}
var (
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index eb22d007922f..e32843cad14a 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -17,11 +17,13 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource"
import (
"context"
"fmt"
+ "net/url"
"os"
"strings"
+ "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
const (
@@ -57,7 +59,7 @@ func (fromEnv) Detect(context.Context) (*Resource, error) {
var res *Resource
if svcName != "" {
- res = NewSchemaless(semconv.ServiceNameKey.String(svcName))
+ res = NewSchemaless(semconv.ServiceName(svcName))
}
r2, err := constructOTResources(attrs)
@@ -88,7 +90,14 @@ func constructOTResources(s string) (*Resource, error) {
invalid = append(invalid, p)
continue
}
- k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1])
+ k := strings.TrimSpace(field[0])
+ v, err := url.QueryUnescape(strings.TrimSpace(field[1]))
+ if err != nil {
+ // Retain original value if decoding fails, otherwise it will be
+ // an empty string.
+ v = field[1]
+ otel.Handle(err)
+ }
attrs = append(attrs, attribute.String(k, v))
}
var err error
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 3b4d0c14dbd5..815fe5c20410 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -19,7 +19,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
type osDescriptionProvider func() (string, error)
@@ -63,7 +63,7 @@ func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
return NewWithAttributes(
semconv.SchemaURL,
- semconv.OSDescriptionKey.String(description),
+ semconv.OSDescription(description),
), nil
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index 9a169f663fb5..bdd0e7fe6803 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -22,7 +22,7 @@ import (
"path/filepath"
"runtime"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
)
type pidProvider func() int
@@ -120,14 +120,14 @@ type processRuntimeDescriptionDetector struct{}
// Detect returns a *Resource that describes the process identifier (PID) of the
// executing process.
func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
}
// Detect returns a *Resource that describes the name of the process executable.
func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
executableName := filepath.Base(commandArgs()[0])
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
}
// Detect returns a *Resource that describes the full path of the process executable.
@@ -137,13 +137,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err
return nil, err
}
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil
}
// Detect returns a *Resource that describes all the command arguments as received
// by the process.
func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
}
// Detect returns a *Resource that describes the username of the user that owns the
@@ -154,18 +154,18 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
return nil, err
}
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil
}
// Detect returns a *Resource that describes the name of the compiler used to compile
// this process image.
func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
}
// Detect returns a *Resource that describes the version of the runtime of this process.
func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
}
// Detect returns a *Resource that describes the runtime of this process.
@@ -175,6 +175,6 @@ func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource,
return NewWithAttributes(
semconv.SchemaURL,
- semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription),
+ semconv.ProcessRuntimeDescription(runtimeDescription),
), nil
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 292ea5481bc9..201c17817004 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -76,6 +76,7 @@ type TracerProvider struct {
mu sync.Mutex
namedTracer map[instrumentation.Scope]*tracer
spanProcessors atomic.Value
+ isShutdown bool
// These fields are not protected by the lock mu. They are assumed to be
// immutable after creation of the TracerProvider.
@@ -116,12 +117,13 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
spanLimits: o.spanLimits,
resource: o.resource,
}
-
global.Info("TracerProvider created", "config", o)
+ spss := spanProcessorStates{}
for _, sp := range o.processors {
- tp.RegisterSpanProcessor(sp)
+ spss = append(spss, newSpanProcessorState(sp))
}
+ tp.spanProcessors.Store(spss)
return tp
}
@@ -159,44 +161,44 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
}
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
-func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) {
+func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
p.mu.Lock()
defer p.mu.Unlock()
- newSPS := spanProcessorStates{}
- if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok {
- newSPS = append(newSPS, old...)
- }
- newSpanSync := &spanProcessorState{
- sp: s,
- state: &sync.Once{},
+ if p.isShutdown {
+ return
}
- newSPS = append(newSPS, newSpanSync)
+ newSPS := spanProcessorStates{}
+ newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...)
+ newSPS = append(newSPS, newSpanProcessorState(sp))
p.spanProcessors.Store(newSPS)
}
// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
-func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) {
+func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
p.mu.Lock()
defer p.mu.Unlock()
- spss := spanProcessorStates{}
- old, ok := p.spanProcessors.Load().(spanProcessorStates)
- if !ok || len(old) == 0 {
+ if p.isShutdown {
return
}
+ old := p.spanProcessors.Load().(spanProcessorStates)
+ if len(old) == 0 {
+ return
+ }
+ spss := spanProcessorStates{}
spss = append(spss, old...)
// stop the span processor if it is started and remove it from the list
var stopOnce *spanProcessorState
var idx int
for i, sps := range spss {
- if sps.sp == s {
+ if sps.sp == sp {
stopOnce = sps
idx = i
}
}
if stopOnce != nil {
stopOnce.state.Do(func() {
- if err := s.Shutdown(context.Background()); err != nil {
+ if err := sp.Shutdown(context.Background()); err != nil {
otel.Handle(err)
}
})
@@ -213,10 +215,7 @@ func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) {
// ForceFlush immediately exports all spans that have not yet been exported for
// all the registered span processors.
func (p *TracerProvider) ForceFlush(ctx context.Context) error {
- spss, ok := p.spanProcessors.Load().(spanProcessorStates)
- if !ok {
- return fmt.Errorf("failed to load span processors")
- }
+ spss := p.spanProcessors.Load().(spanProcessorStates)
if len(spss) == 0 {
return nil
}
@@ -235,12 +234,18 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error {
return nil
}
-// Shutdown shuts down the span processors in the order they were registered.
+// Shutdown shuts down TracerProvider. All registered span processors are shut down
+// in the order they were registered and any held computational resources are released.
func (p *TracerProvider) Shutdown(ctx context.Context) error {
- spss, ok := p.spanProcessors.Load().(spanProcessorStates)
- if !ok {
- return fmt.Errorf("failed to load span processors")
+ spss := p.spanProcessors.Load().(spanProcessorStates)
+ if len(spss) == 0 {
+ return nil
}
+
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.isShutdown = true
+
var retErr error
for _, sps := range spss {
select {
@@ -262,6 +267,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
}
}
}
+ p.spanProcessors.Store(spanProcessorStates{})
return retErr
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
index a6dcf4b307c1..5ee9715d27bb 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -81,7 +81,7 @@ type traceIDRatioSampler struct {
func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
psc := trace.SpanContextFromContext(p.ParentContext)
- x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1
+ x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
if x < ts.traceIDUpperBound {
return SamplingResult{
Decision: RecordAndSample,
@@ -163,10 +163,10 @@ func NeverSample() Sampler {
// the root(Sampler) is used to make sampling decision. If the span has
// a parent, depending on whether the parent is remote and whether it
// is sampled, one of the following samplers will apply:
-// - remoteParentSampled(Sampler) (default: AlwaysOn)
-// - remoteParentNotSampled(Sampler) (default: AlwaysOff)
-// - localParentSampled(Sampler) (default: AlwaysOn)
-// - localParentNotSampled(Sampler) (default: AlwaysOff)
+// - remoteParentSampled(Sampler) (default: AlwaysOn)
+// - remoteParentNotSampled(Sampler) (default: AlwaysOff)
+// - localParentSampled(Sampler) (default: AlwaysOn)
+// - localParentNotSampled(Sampler) (default: AlwaysOff)
func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
return parentBased{
root: root,
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index 449cf6c2552d..9fb483a99feb 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/internal"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
@@ -189,15 +189,18 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) {
if !s.IsRecording() {
return
}
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.status.Code > code {
+ return
+ }
status := Status{Code: code}
if code == codes.Error {
status.Description = description
}
- s.mu.Lock()
s.status = status
- s.mu.Unlock()
}
// SetAttributes sets attributes of this span.
@@ -310,26 +313,13 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
return attr.Key.String(safeTruncate(v, limit))
}
case attribute.STRINGSLICE:
- // Do no mutate the original, make a copy.
- trucated := attr.Key.StringSlice(attr.Value.AsStringSlice())
- // Do not do this.
- //
- // v := trucated.Value.AsStringSlice()
- // cp := make([]string, len(v))
- // /* Copy and truncate values to cp ... */
- // trucated.Value = attribute.StringSliceValue(cp)
- //
- // Copying the []string and then assigning it back as a new value with
- // attribute.StringSliceValue will copy the data twice. Instead, we
- // already made a copy above that only this function owns, update the
- // underlying slice data of our copy.
- v := trucated.Value.AsStringSlice()
+ v := attr.Value.AsStringSlice()
for i := range v {
if len(v[i]) > limit {
v[i] = safeTruncate(v[i], limit)
}
}
- return trucated
+ return attr.Key.StringSlice(v)
}
return attr
}
@@ -393,14 +383,14 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
defer panic(recovered)
opts := []trace.EventOption{
trace.WithAttributes(
- semconv.ExceptionTypeKey.String(typeStr(recovered)),
- semconv.ExceptionMessageKey.String(fmt.Sprint(recovered)),
+ semconv.ExceptionType(typeStr(recovered)),
+ semconv.ExceptionMessage(fmt.Sprint(recovered)),
),
}
if config.StackTrace() {
opts = append(opts, trace.WithAttributes(
- semconv.ExceptionStacktraceKey.String(recordStackTrace()),
+ semconv.ExceptionStacktrace(recordStackTrace()),
))
}
@@ -420,14 +410,13 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
s.mu.Unlock()
- if sps, ok := s.tracer.provider.spanProcessors.Load().(spanProcessorStates); ok {
- if len(sps) == 0 {
- return
- }
- snap := s.snapshot()
- for _, sp := range sps {
- sp.sp.OnEnd(snap)
- }
+ sps := s.tracer.provider.spanProcessors.Load().(spanProcessorStates)
+ if len(sps) == 0 {
+ return
+ }
+ snap := s.snapshot()
+ for _, sp := range sps {
+ sp.sp.OnEnd(snap)
}
}
@@ -441,14 +430,14 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
}
opts = append(opts, trace.WithAttributes(
- semconv.ExceptionTypeKey.String(typeStr(err)),
- semconv.ExceptionMessageKey.String(err.Error()),
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
))
c := trace.NewEventConfig(opts...)
if c.StackTrace() {
opts = append(opts, trace.WithAttributes(
- semconv.ExceptionStacktraceKey.String(recordStackTrace()),
+ semconv.ExceptionStacktrace(recordStackTrace()),
))
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
index b649a2ff049f..e6ae19352195 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
@@ -64,4 +64,9 @@ type spanProcessorState struct {
sp SpanProcessor
state *sync.Once
}
+
+func newSpanProcessorState(sp SpanProcessor) *spanProcessorState {
+ return &spanProcessorState{sp: sp, state: &sync.Once{}}
+}
+
type spanProcessorStates []*spanProcessorState
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
index 7b11fc465c69..f17d924b89e3 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -51,7 +51,7 @@ func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanS
s := tr.newSpan(ctx, name, &config)
if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
- sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates)
+ sps := tr.provider.spanProcessors.Load().(spanProcessorStates)
for _, sp := range sps {
sp.sp.OnStart(ctx, rw)
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
new file mode 100644
index 000000000000..12d6b520f528
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
@@ -0,0 +1,404 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+)
+
+// HTTPConv are the HTTP semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type HTTPConv struct {
+ NetConv *NetConv
+
+ EnduserIDKey attribute.Key
+ HTTPClientIPKey attribute.Key
+ HTTPFlavorKey attribute.Key
+ HTTPMethodKey attribute.Key
+ HTTPRequestContentLengthKey attribute.Key
+ HTTPResponseContentLengthKey attribute.Key
+ HTTPRouteKey attribute.Key
+ HTTPSchemeHTTP attribute.KeyValue
+ HTTPSchemeHTTPS attribute.KeyValue
+ HTTPStatusCodeKey attribute.Key
+ HTTPTargetKey attribute.Key
+ HTTPURLKey attribute.Key
+ HTTPUserAgentKey attribute.Key
+}
+
+// ClientResponse returns attributes for an HTTP response received by a client
+// from a server. The following attributes are returned if the related values
+// are defined in resp: "http.status.code", "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// append(ClientResponse(resp), ClientRequest(resp.Request)...)
+func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
+ var n int
+ if resp.StatusCode > 0 {
+ n++
+ }
+ if resp.ContentLength > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ if resp.StatusCode > 0 {
+ attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
+ }
+ if resp.ContentLength > 0 {
+ attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
+ }
+ return attrs
+}
+
+// ClientRequest returns attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.url", "http.flavor",
+// "http.method", "net.peer.name". The following attributes are returned if the
+// related values are defined in req: "net.peer.port", "http.user_agent",
+// "http.request_content_length", "enduser.id".
+func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue {
+ n := 3 // URL, peer name, proto, and method.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+ if req.ContentLength > 0 {
+ n++
+ }
+ userID, _, hasUserID := req.BasicAuth()
+ if hasUserID {
+ n++
+ }
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.method(req.Method))
+ attrs = append(attrs, c.proto(req.Proto))
+
+ var u string
+ if req.URL != nil {
+ // Remove any username/password info that may be in the URL.
+ userinfo := req.URL.User
+ req.URL.User = nil
+ u = req.URL.String()
+ // Restore any username/password info that was removed.
+ req.URL.User = userinfo
+ }
+ attrs = append(attrs, c.HTTPURLKey.String(u))
+
+ attrs = append(attrs, c.NetConv.PeerName(peer))
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.HTTPUserAgentKey.String(useragent))
+ }
+
+ if l := req.ContentLength; l > 0 {
+ attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
+ }
+
+ if hasUserID {
+ attrs = append(attrs, c.EnduserIDKey.String(userID))
+ }
+
+ return attrs
+}
+
+// ServerRequest returns attributes for an HTTP request received by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.flavor", "http.target", "net.host.name". The following attributes are
+// returned if they related values are defined in req: "net.host.port",
+// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
+// "http.client_ip".
+func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
+ // TODO: This currently does not add the specification required
+ // `http.target` attribute. It has too high of a cardinality to safely be
+ // added. An alternate should be added, or this comment removed, when it is
+ // addressed by the specification. If it is ultimately decided to continue
+ // not including the attribute, the HTTPTargetKey field of the HTTPConv
+ // should be removed as well.
+
+ n := 4 // Method, scheme, proto, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ peer, peerPort := splitHostPort(req.RemoteAddr)
+ if peer != "" {
+ n++
+ if peerPort > 0 {
+ n++
+ }
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+ userID, _, hasUserID := req.BasicAuth()
+ if hasUserID {
+ n++
+ }
+ clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
+ if clientIP != "" {
+ n++
+ }
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.method(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.proto(req.Proto))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+
+ if peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
+ if peerPort > 0 {
+ attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
+ }
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.HTTPUserAgentKey.String(useragent))
+ }
+
+ if hasUserID {
+ attrs = append(attrs, c.EnduserIDKey.String(userID))
+ }
+
+ if clientIP != "" {
+ attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
+ }
+
+ return attrs
+}
+
+func (c *HTTPConv) method(method string) attribute.KeyValue {
+ if method == "" {
+ return c.HTTPMethodKey.String(http.MethodGet)
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return c.HTTPSchemeHTTPS
+ }
+ return c.HTTPSchemeHTTP
+}
+
+func (c *HTTPConv) proto(proto string) attribute.KeyValue {
+ switch proto {
+ case "HTTP/1.0":
+ return c.HTTPFlavorKey.String("1.0")
+ case "HTTP/1.1":
+ return c.HTTPFlavorKey.String("1.1")
+ case "HTTP/2":
+ return c.HTTPFlavorKey.String("2.0")
+ case "HTTP/3":
+ return c.HTTPFlavorKey.String("3.0")
+ default:
+ return c.HTTPFlavorKey.String(proto)
+ }
+}
+
+func serverClientIP(xForwardedFor string) string {
+ if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
+ xForwardedFor = xForwardedFor[:idx]
+ }
+ return xForwardedFor
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+ if https {
+ if port > 0 && port != 443 {
+ return port
+ }
+ } else {
+ if port > 0 && port != 80 {
+ return port
+ }
+ }
+ return -1
+}
+
+// Return the request host and port from the first non-empty source.
+func firstHostPort(source ...string) (host string, port int) {
+ for _, hostport := range source {
+ host, port = splitHostPort(hostport)
+ if host != "" || port > 0 {
+ break
+ }
+ }
+ return
+}
+
+// RequestHeader returns the contents of h as OpenTelemetry attributes.
+func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue {
+ return c.header("http.request.header", h)
+}
+
+// ResponseHeader returns the contents of h as OpenTelemetry attributes.
+func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue {
+ return c.header("http.response.header", h)
+}
+
+func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue {
+ key := func(k string) attribute.Key {
+ k = strings.ToLower(k)
+ k = strings.ReplaceAll(k, "-", "_")
+ k = fmt.Sprintf("%s.%s", prefix, k)
+ return attribute.Key(k)
+ }
+
+ attrs := make([]attribute.KeyValue, 0, len(h))
+ for k, v := range h {
+ attrs = append(attrs, key(k).StringSlice(v))
+ }
+ return attrs
+}
+
+// ClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) {
+ stat, valid := validateHTTPStatusCode(code)
+ if !valid {
+ return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ return stat, ""
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) {
+ stat, valid := validateHTTPStatusCode(code)
+ if !valid {
+ return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+
+ if code/100 == 4 {
+ return codes.Unset, ""
+ }
+ return stat, ""
+}
+
+type codeRange struct {
+ fromInclusive int
+ toInclusive int
+}
+
+func (r codeRange) contains(code int) bool {
+ return r.fromInclusive <= code && code <= r.toInclusive
+}
+
+var validRangesPerCategory = map[int][]codeRange{
+ 1: {
+ {http.StatusContinue, http.StatusEarlyHints},
+ },
+ 2: {
+ {http.StatusOK, http.StatusAlreadyReported},
+ {http.StatusIMUsed, http.StatusIMUsed},
+ },
+ 3: {
+ {http.StatusMultipleChoices, http.StatusUseProxy},
+ {http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
+ },
+ 4: {
+ {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
+ {http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
+ {http.StatusPreconditionRequired, http.StatusTooManyRequests},
+ {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
+ {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
+ },
+ 5: {
+ {http.StatusInternalServerError, http.StatusLoopDetected},
+ {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
+ },
+}
+
+// validateHTTPStatusCode validates the HTTP status code and returns
+// corresponding span status code. If the `code` is not a valid HTTP status
+// code, returns span status Error and false.
+func validateHTTPStatusCode(code int) (codes.Code, bool) {
+ category := code / 100
+ ranges, ok := validRangesPerCategory[category]
+ if !ok {
+ return codes.Error, false
+ }
+ ok = false
+ for _, crange := range ranges {
+ ok = crange.contains(code)
+ if ok {
+ break
+ }
+ }
+ if !ok {
+ return codes.Error, false
+ }
+ if category > 0 && category < 4 {
+ return codes.Unset, true
+ }
+ return codes.Error, true
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go
new file mode 100644
index 000000000000..4a711133a026
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go
@@ -0,0 +1,324 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
+
+import (
+ "net"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// NetConv are the network semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type NetConv struct {
+ NetHostNameKey attribute.Key
+ NetHostPortKey attribute.Key
+ NetPeerNameKey attribute.Key
+ NetPeerPortKey attribute.Key
+ NetSockFamilyKey attribute.Key
+ NetSockPeerAddrKey attribute.Key
+ NetSockPeerPortKey attribute.Key
+ NetSockHostAddrKey attribute.Key
+ NetSockHostPortKey attribute.Key
+ NetTransportOther attribute.KeyValue
+ NetTransportTCP attribute.KeyValue
+ NetTransportUDP attribute.KeyValue
+ NetTransportInProc attribute.KeyValue
+}
+
+func (c *NetConv) Transport(network string) attribute.KeyValue {
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ return c.NetTransportTCP
+ case "udp", "udp4", "udp6":
+ return c.NetTransportUDP
+ case "unix", "unixgram", "unixpacket":
+ return c.NetTransportInProc
+ default:
+ // "ip:*", "ip4:*", and "ip6:*" all are considered other.
+ return c.NetTransportOther
+ }
+}
+
+// Host returns attributes for a network host address.
+func (c *NetConv) Host(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.HostName(h))
+ if p > 0 {
+ attrs = append(attrs, c.HostPort(int(p)))
+ }
+ return attrs
+}
+
+// Server returns attributes for a network listener listening at address. See
+// net.Listen for information about acceptable address values, address should
+// be the same as the one used to create ln. If ln is nil, only network host
+// attributes will be returned that describe address. Otherwise, the socket
+// level information about ln will also be included.
+func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue {
+ if ln == nil {
+ return c.Host(address)
+ }
+
+ lAddr := ln.Addr()
+ if lAddr == nil {
+ return c.Host(address)
+ }
+
+ hostName, hostPort := splitHostPort(address)
+ sockHostAddr, sockHostPort := splitHostPort(lAddr.String())
+ network := lAddr.Network()
+ sockFamily := family(network, sockHostAddr)
+
+ n := nonZeroStr(hostName, network, sockHostAddr, sockFamily)
+ n += positiveInt(hostPort, sockHostPort)
+ attr := make([]attribute.KeyValue, 0, n)
+ if hostName != "" {
+ attr = append(attr, c.HostName(hostName))
+ if hostPort > 0 {
+ // Only if net.host.name is set should net.host.port be.
+ attr = append(attr, c.HostPort(hostPort))
+ }
+ }
+ if network != "" {
+ attr = append(attr, c.Transport(network))
+ }
+ if sockFamily != "" {
+ attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
+ }
+ if sockHostAddr != "" {
+ attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
+ if sockHostPort > 0 {
+ // Only if net.sock.host.addr is set should net.sock.host.port be.
+ attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
+ }
+ }
+ return attr
+}
+
+func (c *NetConv) HostName(name string) attribute.KeyValue {
+ return c.NetHostNameKey.String(name)
+}
+
+func (c *NetConv) HostPort(port int) attribute.KeyValue {
+ return c.NetHostPortKey.Int(port)
+}
+
+// Client returns attributes for a client network connection to address. See
+// net.Dial for information about acceptable address values, address should be
+// the same as the one used to create conn. If conn is nil, only network peer
+// attributes will be returned that describe address. Otherwise, the socket
+// level information about conn will also be included.
+func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue {
+ if conn == nil {
+ return c.Peer(address)
+ }
+
+ lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr()
+
+ var network string
+ switch {
+ case lAddr != nil:
+ network = lAddr.Network()
+ case rAddr != nil:
+ network = rAddr.Network()
+ default:
+ return c.Peer(address)
+ }
+
+ peerName, peerPort := splitHostPort(address)
+ var (
+ sockFamily string
+ sockPeerAddr string
+ sockPeerPort int
+ sockHostAddr string
+ sockHostPort int
+ )
+
+ if lAddr != nil {
+ sockHostAddr, sockHostPort = splitHostPort(lAddr.String())
+ }
+
+ if rAddr != nil {
+ sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String())
+ }
+
+ switch {
+ case sockHostAddr != "":
+ sockFamily = family(network, sockHostAddr)
+ case sockPeerAddr != "":
+ sockFamily = family(network, sockPeerAddr)
+ }
+
+ n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily)
+ n += positiveInt(peerPort, sockPeerPort, sockHostPort)
+ attr := make([]attribute.KeyValue, 0, n)
+ if peerName != "" {
+ attr = append(attr, c.PeerName(peerName))
+ if peerPort > 0 {
+ // Only if net.peer.name is set should net.peer.port be.
+ attr = append(attr, c.PeerPort(peerPort))
+ }
+ }
+ if network != "" {
+ attr = append(attr, c.Transport(network))
+ }
+ if sockFamily != "" {
+ attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
+ }
+ if sockPeerAddr != "" {
+ attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr))
+ if sockPeerPort > 0 {
+ // Only if net.sock.peer.addr is set should net.sock.peer.port be.
+ attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort))
+ }
+ }
+ if sockHostAddr != "" {
+ attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
+ if sockHostPort > 0 {
+ // Only if net.sock.host.addr is set should net.sock.host.port be.
+ attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
+ }
+ }
+ return attr
+}
+
+func family(network, address string) string {
+ switch network {
+ case "unix", "unixgram", "unixpacket":
+ return "unix"
+ default:
+ if ip := net.ParseIP(address); ip != nil {
+ if ip.To4() == nil {
+ return "inet6"
+ }
+ return "inet"
+ }
+ }
+ return ""
+}
+
+func nonZeroStr(strs ...string) int {
+ var n int
+ for _, str := range strs {
+ if str != "" {
+ n++
+ }
+ }
+ return n
+}
+
+func positiveInt(ints ...int) int {
+ var n int
+ for _, i := range ints {
+ if i > 0 {
+ n++
+ }
+ }
+ return n
+}
+
+// Peer returns attributes for a network peer address.
+func (c *NetConv) Peer(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.PeerName(h))
+ if p > 0 {
+ attrs = append(attrs, c.PeerPort(int(p)))
+ }
+ return attrs
+}
+
+func (c *NetConv) PeerName(name string) attribute.KeyValue {
+ return c.NetPeerNameKey.String(name)
+}
+
+func (c *NetConv) PeerPort(port int) attribute.KeyValue {
+ return c.NetPeerPortKey.Int(port)
+}
+
+func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue {
+ return c.NetSockPeerAddrKey.String(addr)
+}
+
+func (c *NetConv) SockPeerPort(port int) attribute.KeyValue {
+ return c.NetSockPeerPortKey.Int(port)
+}
+
+// splitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func splitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndex(hostport, "]")
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndex(hostport, ":"); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p)
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
new file mode 100644
index 000000000000..71a1f7748d55
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.17.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
new file mode 100644
index 000000000000..679c40c4de49
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
similarity index 70%
rename from cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/unit/unit.go
rename to cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
index 647d77302de2..9b8c559de427 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/unit/unit.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
@@ -12,14 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package unit // import "go.opentelemetry.io/otel/metric/unit"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
-// Unit is a determinate standard quantity of measurement.
-type Unit string
-
-// Units defined by OpenTelemetry.
const (
- Dimensionless Unit = "1"
- Bytes Unit = "By"
- Milliseconds Unit = "ms"
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
)
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
similarity index 65%
rename from cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/unit/doc.go
rename to cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
index f8e723593e61..d5c4b5c136aa 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/metric/unit/doc.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
@@ -12,9 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package unit provides units.
-//
-// This package is currently in a pre-GA phase. Backwards incompatible changes
-// may be introduced in subsequent minor version releases as we work to track
-// the evolving OpenTelemetry specification and user feedback.
-package unit // import "go.opentelemetry.io/otel/metric/unit"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go
new file mode 100644
index 000000000000..c60b2a6bb68a
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go
@@ -0,0 +1,150 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httpconv provides OpenTelemetry semantic convetions for the net/http
+// package from the standard library.
+package httpconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
+
+import (
+ "net/http"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/semconv/internal/v2"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+var (
+ nc = &internal.NetConv{
+ NetHostNameKey: semconv.NetHostNameKey,
+ NetHostPortKey: semconv.NetHostPortKey,
+ NetPeerNameKey: semconv.NetPeerNameKey,
+ NetPeerPortKey: semconv.NetPeerPortKey,
+ NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
+ NetSockPeerPortKey: semconv.NetSockPeerPortKey,
+ NetTransportOther: semconv.NetTransportOther,
+ NetTransportTCP: semconv.NetTransportTCP,
+ NetTransportUDP: semconv.NetTransportUDP,
+ NetTransportInProc: semconv.NetTransportInProc,
+ }
+
+ hc = &internal.HTTPConv{
+ NetConv: nc,
+
+ EnduserIDKey: semconv.EnduserIDKey,
+ HTTPClientIPKey: semconv.HTTPClientIPKey,
+ HTTPFlavorKey: semconv.HTTPFlavorKey,
+ HTTPMethodKey: semconv.HTTPMethodKey,
+ HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
+ HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
+ HTTPRouteKey: semconv.HTTPRouteKey,
+ HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
+ HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
+ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
+ HTTPTargetKey: semconv.HTTPTargetKey,
+ HTTPURLKey: semconv.HTTPURLKey,
+ HTTPUserAgentKey: semconv.HTTPUserAgentKey,
+ }
+)
+
+// ClientResponse returns attributes for an HTTP response received by a client
+// from a server. It will return the following attributes if the related values
+// are defined in resp: "http.status.code", "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// append(ClientResponse(resp), ClientRequest(resp.Request)...)
+func ClientResponse(resp *http.Response) []attribute.KeyValue {
+ return hc.ClientResponse(resp)
+}
+
+// ClientRequest returns attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.url", "http.flavor",
+// "http.method", "net.peer.name". The following attributes are returned if the
+// related values are defined in req: "net.peer.port", "http.user_agent",
+// "http.request_content_length", "enduser.id".
+func ClientRequest(req *http.Request) []attribute.KeyValue {
+ return hc.ClientRequest(req)
+}
+
+// ClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func ClientStatus(code int) (codes.Code, string) {
+ return hc.ClientStatus(code)
+}
+
+// ServerRequest returns attributes for an HTTP request received by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.flavor", "http.target", "net.host.name". The following attributes are
+// returned if they related values are defined in req: "net.host.port",
+// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
+// "http.client_ip".
+func ServerRequest(server string, req *http.Request) []attribute.KeyValue {
+ return hc.ServerRequest(server, req)
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func ServerStatus(code int) (codes.Code, string) {
+ return hc.ServerStatus(code)
+}
+
+// RequestHeader returns the contents of h as attributes.
+//
+// Instrumentation should require an explicit configuration of which headers to
+// captured and then prune what they pass here. Including all headers can be a
+// security risk - explicit configuration helps avoid leaking sensitive
+// information.
+//
+// The User-Agent header is already captured in the http.user_agent attribute
+// from ClientRequest and ServerRequest. Instrumentation may provide an option
+// to capture that header here even though it is not recommended. Otherwise,
+// instrumentation should filter that out of what is passed.
+func RequestHeader(h http.Header) []attribute.KeyValue {
+ return hc.RequestHeader(h)
+}
+
+// ResponseHeader returns the contents of h as attributes.
+//
+// Instrumentation should require an explicit configuration of which headers to
+// captured and then prune what they pass here. Including all headers can be a
+// security risk - explicit configuration helps avoid leaking sensitive
+// information.
+//
+// The User-Agent header is already captured in the http.user_agent attribute
+// from ClientRequest and ServerRequest. Instrumentation may provide an option
+// to capture that header here even though it is not recommended. Otherwise,
+// instrumentation should filter that out of what is passed.
+func ResponseHeader(h http.Header) []attribute.KeyValue {
+ return hc.ResponseHeader(h)
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
new file mode 100644
index 000000000000..39a2eab3a6a9
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
@@ -0,0 +1,2010 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserUserAgentKey is the attribute Key conforming to the
+ // "browser.user_agent" semantic conventions. It represents the full
+ // user-agent string provided by the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)
+ // AppleWebKit/537.36 (KHTML, '
+ // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
+ // Note: The user-agent value SHOULD be provided only from browsers that do
+ // not have a mechanism to retrieve brands and platform individually from
+ // the User-Agent Client Hints API. To retrieve the value, the legacy
+ // `navigator.userAgent` API can be used.
+ BrowserUserAgentKey = attribute.Key("browser.user_agent")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserUserAgent returns an attribute KeyValue conforming to the
+// "browser.user_agent" semantic conventions. It represents the full user-agent
+// string provided by the browser
+func BrowserUserAgent(val string) attribute.KeyValue {
+ return BrowserUserAgentKey.String(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://intl.cloud.tencent.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `faas.id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic
+ // conventions. It represents the unique ID of the single function that
+ // this runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so consider setting `faas.id` as a span attribute instead.
+ //
+ // The exact value to use for `faas.id` depends on the cloud provider:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ FaaSIDKey = attribute.Key("faas.id")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function in MiB.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 128
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic
+// conventions. It represents the unique ID of the single function that this
+// runtime instance executes.
+func FaaSID(val string) attribute.KeyValue {
+ return FaaSIDKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function in MiB.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // Linux systems, the `machine-id` located in `/etc/machine-id` or
+ // `/var/lib/dbus/machine-id` may be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized Linux
+// systems, the `machine-id` located in `/etc/machine-id` or
+// `/var/lib/dbus/machine-id` may be used.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OtelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OtelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OtelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OtelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OtelScopeName(val string) attribute.KeyValue {
+ return OtelScopeNameKey.String(val)
+}
+
+// OtelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OtelScopeVersion(val string) attribute.KeyValue {
+ return OtelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OtelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OtelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OtelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OtelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OtelLibraryName(val string) attribute.KeyValue {
+ return OtelLibraryNameKey.String(val)
+}
+
+// OtelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OtelLibraryVersion(val string) attribute.KeyValue {
+ return OtelLibraryVersionKey.String(val)
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
new file mode 100644
index 000000000000..42fc525d1657
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.17.0"
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
new file mode 100644
index 000000000000..8c4a7299d27b
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
@@ -0,0 +1,3375 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `faas.id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable and not
+ // explicitly disabled via instrumentation configuration.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ // Note: The value may be sanitized to exclude sensitive information.
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OtelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OtelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OtelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OtelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OtelStatusCodeOk = OtelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OtelStatusCodeError = OtelStatusCodeKey.String("ERROR")
+)
+
+// OtelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OtelStatusDescription(val string) attribute.KeyValue {
+ return OtelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSExecutionKey is the attribute Key conforming to the "faas.execution"
+ // semantic conventions. It represents the execution ID of the current
+ // function execution.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSExecution returns an attribute KeyValue conforming to the
+// "faas.execution" semantic conventions. It represents the execution ID of the
+// current function execution.
+func FaaSExecution(val string) attribute.KeyValue {
+ return FaaSExecutionKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetAppProtocolNameKey is the attribute Key conforming to the
+ // "net.app.protocol.name" semantic conventions. It represents the
+ // application layer protocol used. The value SHOULD be normalized to
+ // lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetAppProtocolNameKey = attribute.Key("net.app.protocol.name")
+
+ // NetAppProtocolVersionKey is the attribute Key conforming to the
+ // "net.app.protocol.version" semantic conventions. It represents the
+ // version of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.app.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client used has a version of `0.27.2`, but sends HTTP version
+ // `1.1`, this attribute should be set to `1.1`.
+ NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.host.port` and if `net.sock.host.addr` is set.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetAppProtocolName returns an attribute KeyValue conforming to the
+// "net.app.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetAppProtocolName(val string) attribute.KeyValue {
+ return NetAppProtocolNameKey.String(val)
+}
+
+// NetAppProtocolVersion returns an attribute KeyValue conforming to the
+// "net.app.protocol.version" semantic conventions. It represents the version
+// of the application layer protocol used. See note below.
+func NetAppProtocolVersion(val string) attribute.KeyValue {
+ return NetAppProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+ // semantic conventions. It represents the kind of HTTP protocol used.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: If `net.transport` is not specified, it can be assumed to be
+ // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is
+ // assumed.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+
+ // HTTPUserAgentKey is the attribute Key conforming to the
+ // "http.user_agent" semantic conventions. It represents the value of the
+ // [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+var (
+ // HTTP/1.0
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions. It represents the value of the [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func HTTPUserAgent(val string) attribute.KeyValue {
+ return HTTPUserAgentKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/path/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: 'http.route' MUST NOT be populated when this is not supported by
+ // the HTTP server framework as the route attribute should have
+ // low-cardinality and the URI path can NOT substitute it.
+ HTTPRouteKey = attribute.Key("http.route")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationKindKey is the attribute Key conforming to the
+ // "messaging.destination.kind" semantic conventions. It represents the
+ // kind of message destination
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationKindKey = attribute.Key("messaging.destination.kind")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+var (
+ // A message sent to a queue
+ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+ // A message sent to a topic
+ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceKindKey is the attribute Key conforming to the
+ // "messaging.source.kind" semantic conventions. It represents the kind of
+ // message source
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceKindKey = attribute.Key("messaging.source.kind")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+var (
+ // A message received from a queue
+ MessagingSourceKindQueue = MessagingSourceKindKey.String("queue")
+ // A message received from a topic
+ MessagingSourceKindTopic = MessagingSourceKindKey.String("topic")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/config.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/config.go
index f058cc781e00..cb3efbb9ad89 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -25,6 +25,7 @@ type TracerConfig struct {
instrumentationVersion string
// Schema URL of the telemetry emitted by the Tracer.
schemaURL string
+ attrs attribute.Set
}
// InstrumentationVersion returns the version of the library providing instrumentation.
@@ -32,6 +33,12 @@ func (t *TracerConfig) InstrumentationVersion() string {
return t.instrumentationVersion
}
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (t *TracerConfig) InstrumentationAttributes() attribute.Set {
+ return t.attrs
+}
+
// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer.
func (t *TracerConfig) SchemaURL() string {
return t.schemaURL
@@ -307,6 +314,16 @@ func WithInstrumentationVersion(version string) TracerOption {
})
}
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+ return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
// WithSchemaURL sets the schema URL for the Tracer.
func WithSchemaURL(schemaURL string) TracerOption {
return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/doc.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/doc.go
index 391417718f5a..ab0346f9664a 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -17,7 +17,7 @@ Package trace provides an implementation of the tracing part of the
OpenTelemetry API.
To participate in distributed traces a Span needs to be created for the
-operation being performed as part of a traced workflow. It its simplest form:
+operation being performed as part of a traced workflow. In its simplest form:
var tracer trace.Tracer
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/trace.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/trace.go
index 97f3d83855b7..4aa94f79f46a 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -364,8 +364,9 @@ type Span interface {
SpanContext() SpanContext
// SetStatus sets the status of the Span in the form of a code and a
- // description, overriding previous values set. The description is only
- // included in a status when the code is for an error.
+ // description, provided the status hasn't already been set to a higher
+ // value before (OK > Error > Unset). The description is only included in a
+ // status when the code is for an error.
SetStatus(code codes.Code, description string)
// SetName sets the Span name.
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/version.go b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/version.go
index 806db41c555a..0e8e5e023297 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/version.go
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/version.go
@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.10.0"
+ return "1.14.0"
}
diff --git a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/versions.yaml b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/versions.yaml
index ec2ca16d270c..40df1fae4177 100644
--- a/cluster-autoscaler/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/cluster-autoscaler/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -14,10 +14,11 @@
module-sets:
stable-v1:
- version: v1.10.0
+ version: v1.14.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
+ - go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/example/fib
- go.opentelemetry.io/otel/example/jaeger
- go.opentelemetry.io/otel/example/namedtracer
@@ -34,8 +35,9 @@ module-sets:
- go.opentelemetry.io/otel/trace
- go.opentelemetry.io/otel/sdk
experimental-metrics:
- version: v0.31.0
+ version: v0.37.0
modules:
+ - go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
@@ -44,15 +46,12 @@ module-sets:
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- go.opentelemetry.io/otel/metric
- go.opentelemetry.io/otel/sdk/metric
+ - go.opentelemetry.io/otel/bridge/opencensus
+ - go.opentelemetry.io/otel/bridge/opencensus/test
+ - go.opentelemetry.io/otel/example/view
experimental-schema:
- version: v0.0.3
+ version: v0.0.4
modules:
- go.opentelemetry.io/otel/schema
- bridge:
- version: v0.31.0
- modules:
- - go.opentelemetry.io/otel/bridge/opencensus
- - go.opentelemetry.io/otel/bridge/opencensus/test
- - go.opentelemetry.io/otel/example/opencensus
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/.gitignore b/cluster-autoscaler/vendor/go.uber.org/atomic/.gitignore
index c3fa253893f0..2e337a0ed529 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/.gitignore
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/.gitignore
@@ -10,3 +10,6 @@ lint.log
# Profiling output
*.prof
+
+# Output of fossa analyzer
+/fossa
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/.travis.yml b/cluster-autoscaler/vendor/go.uber.org/atomic/.travis.yml
deleted file mode 100644
index 13d0a4f25404..000000000000
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/atomic
-
-env:
- global:
- - GO111MODULE=on
-
-matrix:
- include:
- - go: oldstable
- - go: stable
- env: LINT=1
-
-cache:
- directories:
- - vendor
-
-before_install:
- - go version
-
-script:
- - test -z "$LINT" || make lint
- - make cover
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/CHANGELOG.md b/cluster-autoscaler/vendor/go.uber.org/atomic/CHANGELOG.md
index 24c0274dc321..5fe03f21bd3d 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/CHANGELOG.md
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -4,6 +4,37 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [1.10.0] - 2022-08-11
+### Added
+- Add `atomic.Float32` type for atomic operations on `float32`.
+- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
+ and `atomic.Value`.
+- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any
+ type. This is present only for Go 1.18 or higher, and is a drop-in for
+ replacement for the standard library's `sync/atomic.Pointer` type.
+
+### Changed
+- Deprecate `CAS` methods on all types in favor of corresponding
+ `CompareAndSwap` methods.
+
+Thanks to @eNV25 and @icpd for their contributions to this release.
+
+[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
+
+## [1.9.0] - 2021-07-15
+### Added
+- Add `Float64.Swap` to match int atomic operations.
+- Add `atomic.Time` type for atomic operations on `time.Time` values.
+
+[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
+
+## [1.8.0] - 2021-06-09
+### Added
+- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
+- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
+
+[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
+
## [1.7.0] - 2020-09-14
### Added
- Support JSON serialization and deserialization of primitive atomic types.
@@ -15,32 +46,46 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Removed
- Remove dependency on `golang.org/x/{lint, tools}`.
+[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
+
## [1.6.0] - 2020-02-24
### Changed
- Drop library dependency on `golang.org/x/{lint, tools}`.
+[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
+
## [1.5.1] - 2019-11-19
- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
causing `CAS` to fail even though the old value matches.
+[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
+
## [1.5.0] - 2019-10-29
### Changed
- With Go modules, only the `go.uber.org/atomic` import path is supported now.
If you need to use the old import path, please add a `replace` directive to
your `go.mod`.
+[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
+
## [1.4.0] - 2019-05-01
### Added
- Add `atomic.Error` type for atomic operations on `error` values.
+[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
+
## [1.3.2] - 2018-05-02
### Added
- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
+[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
+
## [1.3.1] - 2017-11-14
### Fixed
- Revert optimization for `atomic.String.Store("")` which caused data races.
+[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
+
## [1.3.0] - 2017-11-13
### Added
- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
@@ -48,10 +93,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Optimize `atomic.String.Store("")` by avoiding an allocation.
+[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
+
## [1.2.0] - 2017-04-12
### Added
- Shadow `atomic.Value` from `sync/atomic`.
+[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
+
## [1.1.0] - 2017-03-10
### Added
- Add atomic `Float64` type.
@@ -59,18 +108,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Support new `go.uber.org/atomic` import path.
+[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
+
## [1.0.0] - 2016-07-18
- Initial release.
-[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
-[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
-[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
-[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
-[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
-[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
-[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
-[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
-[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/Makefile b/cluster-autoscaler/vendor/go.uber.org/atomic/Makefile
index 1b1376d42533..46c945b32beb 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/Makefile
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/Makefile
@@ -69,6 +69,7 @@ generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
generatenodirty:
@[ -z "$$(git status --porcelain)" ] || ( \
echo "Working tree is dirty. Commit your changes first."; \
+ git status; \
exit 1 )
@make generate
@status=$$(git status --porcelain); \
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/README.md b/cluster-autoscaler/vendor/go.uber.org/atomic/README.md
index ade0c20f16b4..96b47a1f12d3 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/README.md
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/README.md
@@ -55,8 +55,8 @@ Released under the [MIT License](LICENSE.txt).
[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
[doc]: https://godoc.org/go.uber.org/atomic
-[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/atomic
+[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/atomic
[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/bool.go b/cluster-autoscaler/vendor/go.uber.org/atomic/bool.go
index 9cf1914b1f82..dfa2085f491d 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/bool.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/bool.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,10 +36,10 @@ type Bool struct {
var _zeroBool bool
// NewBool creates a new Bool.
-func NewBool(v bool) *Bool {
+func NewBool(val bool) *Bool {
x := &Bool{}
- if v != _zeroBool {
- x.Store(v)
+ if val != _zeroBool {
+ x.Store(val)
}
return x
}
@@ -50,19 +50,26 @@ func (x *Bool) Load() bool {
}
// Store atomically stores the passed bool.
-func (x *Bool) Store(v bool) {
- x.v.Store(boolToInt(v))
+func (x *Bool) Store(val bool) {
+ x.v.Store(boolToInt(val))
}
// CAS is an atomic compare-and-swap for bool values.
-func (x *Bool) CAS(o, n bool) bool {
- return x.v.CAS(boolToInt(o), boolToInt(n))
+//
+// Deprecated: Use CompareAndSwap.
+func (x *Bool) CAS(old, new bool) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for bool values.
+func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
+ return x.v.CompareAndSwap(boolToInt(old), boolToInt(new))
}
// Swap atomically stores the given bool and returns the old
// value.
-func (x *Bool) Swap(o bool) bool {
- return truthy(x.v.Swap(boolToInt(o)))
+func (x *Bool) Swap(val bool) (old bool) {
+ return truthy(x.v.Swap(boolToInt(val)))
}
// MarshalJSON encodes the wrapped bool into JSON.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/bool_ext.go b/cluster-autoscaler/vendor/go.uber.org/atomic/bool_ext.go
index c7bf7a827a81..a2e60e987390 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/bool_ext.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/bool_ext.go
@@ -38,7 +38,7 @@ func boolToInt(b bool) uint32 {
}
// Toggle atomically negates the Boolean and returns the previous value.
-func (b *Bool) Toggle() bool {
+func (b *Bool) Toggle() (old bool) {
for {
old := b.Load()
if b.CAS(old, !old) {
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/duration.go b/cluster-autoscaler/vendor/go.uber.org/atomic/duration.go
index 027cfcb20bf5..6f4157445cfb 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/duration.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/duration.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -37,10 +37,10 @@ type Duration struct {
var _zeroDuration time.Duration
// NewDuration creates a new Duration.
-func NewDuration(v time.Duration) *Duration {
+func NewDuration(val time.Duration) *Duration {
x := &Duration{}
- if v != _zeroDuration {
- x.Store(v)
+ if val != _zeroDuration {
+ x.Store(val)
}
return x
}
@@ -51,19 +51,26 @@ func (x *Duration) Load() time.Duration {
}
// Store atomically stores the passed time.Duration.
-func (x *Duration) Store(v time.Duration) {
- x.v.Store(int64(v))
+func (x *Duration) Store(val time.Duration) {
+ x.v.Store(int64(val))
}
// CAS is an atomic compare-and-swap for time.Duration values.
-func (x *Duration) CAS(o, n time.Duration) bool {
- return x.v.CAS(int64(o), int64(n))
+//
+// Deprecated: Use CompareAndSwap.
+func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for time.Duration values.
+func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) {
+ return x.v.CompareAndSwap(int64(old), int64(new))
}
// Swap atomically stores the given time.Duration and returns the old
// value.
-func (x *Duration) Swap(o time.Duration) time.Duration {
- return time.Duration(x.v.Swap(int64(o)))
+func (x *Duration) Swap(val time.Duration) (old time.Duration) {
+ return time.Duration(x.v.Swap(int64(val)))
}
// MarshalJSON encodes the wrapped time.Duration into JSON.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/duration_ext.go b/cluster-autoscaler/vendor/go.uber.org/atomic/duration_ext.go
index 6273b66bd659..4c18b0a9ed42 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/duration_ext.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/duration_ext.go
@@ -25,13 +25,13 @@ import "time"
//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
// Add atomically adds to the wrapped time.Duration and returns the new value.
-func (d *Duration) Add(n time.Duration) time.Duration {
- return time.Duration(d.v.Add(int64(n)))
+func (d *Duration) Add(delta time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(delta)))
}
// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
-func (d *Duration) Sub(n time.Duration) time.Duration {
- return time.Duration(d.v.Sub(int64(n)))
+func (d *Duration) Sub(delta time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(delta)))
}
// String encodes the wrapped value as a string.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/error.go b/cluster-autoscaler/vendor/go.uber.org/atomic/error.go
index a6166fbea01e..27b23ea16282 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/error.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/error.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -32,10 +32,10 @@ type Error struct {
var _zeroError error
// NewError creates a new Error.
-func NewError(v error) *Error {
+func NewError(val error) *Error {
x := &Error{}
- if v != _zeroError {
- x.Store(v)
+ if val != _zeroError {
+ x.Store(val)
}
return x
}
@@ -46,6 +46,17 @@ func (x *Error) Load() error {
}
// Store atomically stores the passed error.
-func (x *Error) Store(v error) {
- x.v.Store(packError(v))
+func (x *Error) Store(val error) {
+ x.v.Store(packError(val))
+}
+
+// CompareAndSwap is an atomic compare-and-swap for error values.
+func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
+ return x.v.CompareAndSwap(packError(old), packError(new))
+}
+
+// Swap atomically stores the given error and returns the old
+// value.
+func (x *Error) Swap(val error) (old error) {
+ return unpackError(x.v.Swap(packError(val)))
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/error_ext.go b/cluster-autoscaler/vendor/go.uber.org/atomic/error_ext.go
index ffe0be21cb01..d31fb633bb63 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/error_ext.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/error_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,7 @@ package atomic
// atomic.Value panics on nil inputs, or if the underlying type changes.
// Stabilize by always storing a custom struct that we control.
-//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
+//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go
type packedError struct{ Value error }
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/float32.go b/cluster-autoscaler/vendor/go.uber.org/atomic/float32.go
new file mode 100644
index 000000000000..5d535a6d2acf
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/float32.go
@@ -0,0 +1,77 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float32 is an atomic type-safe wrapper for float32 values.
+type Float32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroFloat32 float32
+
+// NewFloat32 creates a new Float32.
+func NewFloat32(val float32) *Float32 {
+ x := &Float32{}
+ if val != _zeroFloat32 {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float32.
+func (x *Float32) Load() float32 {
+ return math.Float32frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float32.
+func (x *Float32) Store(val float32) {
+ x.v.Store(math.Float32bits(val))
+}
+
+// Swap atomically stores the given float32 and returns the old
+// value.
+func (x *Float32) Swap(val float32) (old float32) {
+ return math.Float32frombits(x.v.Swap(math.Float32bits(val)))
+}
+
+// MarshalJSON encodes the wrapped float32 into JSON.
+func (x *Float32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float32 from JSON.
+func (x *Float32) UnmarshalJSON(b []byte) error {
+ var v float32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/float32_ext.go b/cluster-autoscaler/vendor/go.uber.org/atomic/float32_ext.go
new file mode 100644
index 000000000000..b0cd8d9c820a
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/float32_ext.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "math"
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go
+
+// Add atomically adds to the wrapped float32 and returns the new value.
+func (f *Float32) Add(delta float32) float32 {
+ for {
+ old := f.Load()
+ new := old + delta
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float32 and returns the new value.
+func (f *Float32) Sub(delta float32) float32 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float32 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float32) CAS(old, new float32) (swapped bool) {
+ return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float32 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CompareAndSwap(old, new) {
+// break
+// }
+// }
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) {
+ return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new))
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float32) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32)
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/float64.go b/cluster-autoscaler/vendor/go.uber.org/atomic/float64.go
index 0719060207da..11d5189a5f29 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/float64.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/float64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -37,10 +37,10 @@ type Float64 struct {
var _zeroFloat64 float64
// NewFloat64 creates a new Float64.
-func NewFloat64(v float64) *Float64 {
+func NewFloat64(val float64) *Float64 {
x := &Float64{}
- if v != _zeroFloat64 {
- x.Store(v)
+ if val != _zeroFloat64 {
+ x.Store(val)
}
return x
}
@@ -51,13 +51,14 @@ func (x *Float64) Load() float64 {
}
// Store atomically stores the passed float64.
-func (x *Float64) Store(v float64) {
- x.v.Store(math.Float64bits(v))
+func (x *Float64) Store(val float64) {
+ x.v.Store(math.Float64bits(val))
}
-// CAS is an atomic compare-and-swap for float64 values.
-func (x *Float64) CAS(o, n float64) bool {
- return x.v.CAS(math.Float64bits(o), math.Float64bits(n))
+// Swap atomically stores the given float64 and returns the old
+// value.
+func (x *Float64) Swap(val float64) (old float64) {
+ return math.Float64frombits(x.v.Swap(math.Float64bits(val)))
}
// MarshalJSON encodes the wrapped float64 into JSON.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/float64_ext.go b/cluster-autoscaler/vendor/go.uber.org/atomic/float64_ext.go
index 927b1add74e5..48c52b0abf66 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/float64_ext.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/float64_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,15 +20,18 @@
package atomic
-import "strconv"
+import (
+ "math"
+ "strconv"
+)
-//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go
+//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go
// Add atomically adds to the wrapped float64 and returns the new value.
-func (f *Float64) Add(s float64) float64 {
+func (f *Float64) Add(delta float64) float64 {
for {
old := f.Load()
- new := old + s
+ new := old + delta
if f.CAS(old, new) {
return new
}
@@ -36,8 +39,34 @@ func (f *Float64) Add(s float64) float64 {
}
// Sub atomically subtracts from the wrapped float64 and returns the new value.
-func (f *Float64) Sub(s float64) float64 {
- return f.Add(-s)
+func (f *Float64) Sub(delta float64) float64 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float64 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float64) CAS(old, new float64) (swapped bool) {
+ return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float64 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CompareAndSwap(old, new) {
+// break
+// }
+// }
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) {
+ return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new))
}
// String encodes the wrapped value as a string.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/gen.go b/cluster-autoscaler/vendor/go.uber.org/atomic/gen.go
index 50d6b248588f..1e9ef4f879c3 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/gen.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/gen.go
@@ -24,3 +24,4 @@ package atomic
//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
+//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/int32.go b/cluster-autoscaler/vendor/go.uber.org/atomic/int32.go
index 18ae56493ee9..b9a68f42ca84 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/int32.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/int32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Int32 struct {
}
// NewInt32 creates a new Int32.
-func NewInt32(i int32) *Int32 {
- return &Int32{v: i}
+func NewInt32(val int32) *Int32 {
+ return &Int32{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Int32) Load() int32 {
}
// Add atomically adds to the wrapped int32 and returns the new value.
-func (i *Int32) Add(n int32) int32 {
- return atomic.AddInt32(&i.v, n)
+func (i *Int32) Add(delta int32) int32 {
+ return atomic.AddInt32(&i.v, delta)
}
// Sub atomically subtracts from the wrapped int32 and returns the new value.
-func (i *Int32) Sub(n int32) int32 {
- return atomic.AddInt32(&i.v, -n)
+func (i *Int32) Sub(delta int32) int32 {
+ return atomic.AddInt32(&i.v, -delta)
}
// Inc atomically increments the wrapped int32 and returns the new value.
@@ -66,18 +66,25 @@ func (i *Int32) Dec() int32 {
}
// CAS is an atomic compare-and-swap.
-func (i *Int32) CAS(old, new int32) bool {
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Int32) CAS(old, new int32) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) {
return atomic.CompareAndSwapInt32(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Int32) Store(n int32) {
- atomic.StoreInt32(&i.v, n)
+func (i *Int32) Store(val int32) {
+ atomic.StoreInt32(&i.v, val)
}
// Swap atomically swaps the wrapped int32 and returns the old value.
-func (i *Int32) Swap(n int32) int32 {
- return atomic.SwapInt32(&i.v, n)
+func (i *Int32) Swap(val int32) (old int32) {
+ return atomic.SwapInt32(&i.v, val)
}
// MarshalJSON encodes the wrapped int32 into JSON.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/int64.go b/cluster-autoscaler/vendor/go.uber.org/atomic/int64.go
index 2bcbbfaa9532..78d260976fcf 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/int64.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/int64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Int64 struct {
}
// NewInt64 creates a new Int64.
-func NewInt64(i int64) *Int64 {
- return &Int64{v: i}
+func NewInt64(val int64) *Int64 {
+ return &Int64{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Int64) Load() int64 {
}
// Add atomically adds to the wrapped int64 and returns the new value.
-func (i *Int64) Add(n int64) int64 {
- return atomic.AddInt64(&i.v, n)
+func (i *Int64) Add(delta int64) int64 {
+ return atomic.AddInt64(&i.v, delta)
}
// Sub atomically subtracts from the wrapped int64 and returns the new value.
-func (i *Int64) Sub(n int64) int64 {
- return atomic.AddInt64(&i.v, -n)
+func (i *Int64) Sub(delta int64) int64 {
+ return atomic.AddInt64(&i.v, -delta)
}
// Inc atomically increments the wrapped int64 and returns the new value.
@@ -66,18 +66,25 @@ func (i *Int64) Dec() int64 {
}
// CAS is an atomic compare-and-swap.
-func (i *Int64) CAS(old, new int64) bool {
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Int64) CAS(old, new int64) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) {
return atomic.CompareAndSwapInt64(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Int64) Store(n int64) {
- atomic.StoreInt64(&i.v, n)
+func (i *Int64) Store(val int64) {
+ atomic.StoreInt64(&i.v, val)
}
// Swap atomically swaps the wrapped int64 and returns the old value.
-func (i *Int64) Swap(n int64) int64 {
- return atomic.SwapInt64(&i.v, n)
+func (i *Int64) Swap(val int64) (old int64) {
+ return atomic.SwapInt64(&i.v, val)
}
// MarshalJSON encodes the wrapped int64 into JSON.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/nocmp.go b/cluster-autoscaler/vendor/go.uber.org/atomic/nocmp.go
index a8201cb4a18e..54b74174abdf 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/nocmp.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/nocmp.go
@@ -23,13 +23,13 @@ package atomic
// nocmp is an uncomparable struct. Embed this inside another struct to make
// it uncomparable.
//
-// type Foo struct {
-// nocmp
-// // ...
-// }
+// type Foo struct {
+// nocmp
+// // ...
+// }
//
// This DOES NOT:
//
-// - Disallow shallow copies of structs
-// - Disallow comparison of pointers to uncomparable structs
+// - Disallow shallow copies of structs
+// - Disallow comparison of pointers to uncomparable structs
type nocmp [0]func()
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/pointer_go118.go b/cluster-autoscaler/vendor/go.uber.org/atomic/pointer_go118.go
new file mode 100644
index 000000000000..e0f47dba4686
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/pointer_go118.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18 && !go1.19
+// +build go1.18,!go1.19
+
+package atomic
+
+import "unsafe"
+
+type Pointer[T any] struct {
+ _ nocmp // disallow non-atomic comparison
+ p UnsafePointer
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+ var p Pointer[T]
+ if v != nil {
+ p.p.Store(unsafe.Pointer(v))
+ }
+ return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+ return (*T)(p.p.Load())
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+ p.p.Store(unsafe.Pointer(val))
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+ return (*T)(p.p.Swap(unsafe.Pointer(val)))
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/pointer_go119.go b/cluster-autoscaler/vendor/go.uber.org/atomic/pointer_go119.go
new file mode 100644
index 000000000000..6726f17ad64f
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/pointer_go119.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.19
+// +build go1.19
+
+package atomic
+
+import "sync/atomic"
+
+// Pointer is an atomic pointer of type *T.
+type Pointer[T any] struct {
+ _ nocmp // disallow non-atomic comparison
+ p atomic.Pointer[T]
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+ var p Pointer[T]
+ if v != nil {
+ p.p.Store(v)
+ }
+ return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+ return p.p.Load()
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+ p.p.Store(val)
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+ return p.p.Swap(val)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return p.p.CompareAndSwap(old, new)
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/string.go b/cluster-autoscaler/vendor/go.uber.org/atomic/string.go
index 225b7a2be0aa..c4bea70f4ddf 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/string.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/string.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -32,10 +32,10 @@ type String struct {
var _zeroString string
// NewString creates a new String.
-func NewString(v string) *String {
+func NewString(val string) *String {
x := &String{}
- if v != _zeroString {
- x.Store(v)
+ if val != _zeroString {
+ x.Store(val)
}
return x
}
@@ -49,6 +49,17 @@ func (x *String) Load() string {
}
// Store atomically stores the passed string.
-func (x *String) Store(v string) {
- x.v.Store(v)
+func (x *String) Store(val string) {
+ x.v.Store(val)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for string values.
+func (x *String) CompareAndSwap(old, new string) (swapped bool) {
+ return x.v.CompareAndSwap(old, new)
+}
+
+// Swap atomically stores the given string and returns the old
+// value.
+func (x *String) Swap(val string) (old string) {
+ return x.v.Swap(val).(string)
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/string_ext.go b/cluster-autoscaler/vendor/go.uber.org/atomic/string_ext.go
index 3a9558213d0d..1f63dfd5b978 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/string_ext.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/string_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,7 +20,7 @@
package atomic
-//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go
// String returns the wrapped value.
func (s *String) String() string {
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/time.go b/cluster-autoscaler/vendor/go.uber.org/atomic/time.go
new file mode 100644
index 000000000000..1660feb14268
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/time.go
@@ -0,0 +1,55 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "time"
+)
+
+// Time is an atomic type-safe wrapper for time.Time values.
+type Time struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroTime time.Time
+
+// NewTime creates a new Time.
+func NewTime(val time.Time) *Time {
+ x := &Time{}
+ if val != _zeroTime {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Time.
+func (x *Time) Load() time.Time {
+ return unpackTime(x.v.Load())
+}
+
+// Store atomically stores the passed time.Time.
+func (x *Time) Store(val time.Time) {
+ x.v.Store(packTime(val))
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/global_prego112.go b/cluster-autoscaler/vendor/go.uber.org/atomic/time_ext.go
similarity index 73%
rename from cluster-autoscaler/vendor/go.uber.org/zap/global_prego112.go
rename to cluster-autoscaler/vendor/go.uber.org/atomic/time_ext.go
index d3ab9af933ee..1e3dc978aa55 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/global_prego112.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/time_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,9 +18,19 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// See #682 for more information.
-// +build !go1.12
+package atomic
-package zap
+import "time"
-const _stdLogDefaultDepth = 2
+//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go
+
+func packTime(t time.Time) interface{} {
+ return t
+}
+
+func unpackTime(v interface{}) time.Time {
+ if t, ok := v.(time.Time); ok {
+ return t
+ }
+ return time.Time{}
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/uint32.go b/cluster-autoscaler/vendor/go.uber.org/atomic/uint32.go
index a973aba1a60b..d6f04a96dc38 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/uint32.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/uint32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Uint32 struct {
}
// NewUint32 creates a new Uint32.
-func NewUint32(i uint32) *Uint32 {
- return &Uint32{v: i}
+func NewUint32(val uint32) *Uint32 {
+ return &Uint32{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Uint32) Load() uint32 {
}
// Add atomically adds to the wrapped uint32 and returns the new value.
-func (i *Uint32) Add(n uint32) uint32 {
- return atomic.AddUint32(&i.v, n)
+func (i *Uint32) Add(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, delta)
}
// Sub atomically subtracts from the wrapped uint32 and returns the new value.
-func (i *Uint32) Sub(n uint32) uint32 {
- return atomic.AddUint32(&i.v, ^(n - 1))
+func (i *Uint32) Sub(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(delta - 1))
}
// Inc atomically increments the wrapped uint32 and returns the new value.
@@ -66,18 +66,25 @@ func (i *Uint32) Dec() uint32 {
}
// CAS is an atomic compare-and-swap.
-func (i *Uint32) CAS(old, new uint32) bool {
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uint32) CAS(old, new uint32) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
return atomic.CompareAndSwapUint32(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Uint32) Store(n uint32) {
- atomic.StoreUint32(&i.v, n)
+func (i *Uint32) Store(val uint32) {
+ atomic.StoreUint32(&i.v, val)
}
// Swap atomically swaps the wrapped uint32 and returns the old value.
-func (i *Uint32) Swap(n uint32) uint32 {
- return atomic.SwapUint32(&i.v, n)
+func (i *Uint32) Swap(val uint32) (old uint32) {
+ return atomic.SwapUint32(&i.v, val)
}
// MarshalJSON encodes the wrapped uint32 into JSON.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/uint64.go b/cluster-autoscaler/vendor/go.uber.org/atomic/uint64.go
index 3b6c71fd5a37..2574bdd5ec4a 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/uint64.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/uint64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Uint64 struct {
}
// NewUint64 creates a new Uint64.
-func NewUint64(i uint64) *Uint64 {
- return &Uint64{v: i}
+func NewUint64(val uint64) *Uint64 {
+ return &Uint64{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Uint64) Load() uint64 {
}
// Add atomically adds to the wrapped uint64 and returns the new value.
-func (i *Uint64) Add(n uint64) uint64 {
- return atomic.AddUint64(&i.v, n)
+func (i *Uint64) Add(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, delta)
}
// Sub atomically subtracts from the wrapped uint64 and returns the new value.
-func (i *Uint64) Sub(n uint64) uint64 {
- return atomic.AddUint64(&i.v, ^(n - 1))
+func (i *Uint64) Sub(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(delta - 1))
}
// Inc atomically increments the wrapped uint64 and returns the new value.
@@ -66,18 +66,25 @@ func (i *Uint64) Dec() uint64 {
}
// CAS is an atomic compare-and-swap.
-func (i *Uint64) CAS(old, new uint64) bool {
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uint64) CAS(old, new uint64) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
return atomic.CompareAndSwapUint64(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Uint64) Store(n uint64) {
- atomic.StoreUint64(&i.v, n)
+func (i *Uint64) Store(val uint64) {
+ atomic.StoreUint64(&i.v, val)
}
// Swap atomically swaps the wrapped uint64 and returns the old value.
-func (i *Uint64) Swap(n uint64) uint64 {
- return atomic.SwapUint64(&i.v, n)
+func (i *Uint64) Swap(val uint64) (old uint64) {
+ return atomic.SwapUint64(&i.v, val)
}
// MarshalJSON encodes the wrapped uint64 into JSON.
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/uintptr.go b/cluster-autoscaler/vendor/go.uber.org/atomic/uintptr.go
new file mode 100644
index 000000000000..81b275a7ad5d
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/uintptr.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uintptr is an atomic wrapper around uintptr.
+type Uintptr struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uintptr
+}
+
+// NewUintptr creates a new Uintptr.
+func NewUintptr(val uintptr) *Uintptr {
+ return &Uintptr{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uintptr) Load() uintptr {
+ return atomic.LoadUintptr(&i.v)
+}
+
+// Add atomically adds to the wrapped uintptr and returns the new value.
+func (i *Uintptr) Add(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uintptr and returns the new value.
+func (i *Uintptr) Sub(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uintptr and returns the new value.
+func (i *Uintptr) Inc() uintptr {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uintptr and returns the new value.
+func (i *Uintptr) Dec() uintptr {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
+ return atomic.CompareAndSwapUintptr(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uintptr) Store(val uintptr) {
+ atomic.StoreUintptr(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uintptr and returns the old value.
+func (i *Uintptr) Swap(val uintptr) (old uintptr) {
+ return atomic.SwapUintptr(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uintptr into JSON.
+func (i *Uintptr) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uintptr.
+func (i *Uintptr) UnmarshalJSON(b []byte) error {
+ var v uintptr
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uintptr) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/unsafe_pointer.go b/cluster-autoscaler/vendor/go.uber.org/atomic/unsafe_pointer.go
new file mode 100644
index 000000000000..34868baf6a85
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/unsafe_pointer.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2021-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// UnsafePointer is an atomic wrapper around unsafe.Pointer.
+type UnsafePointer struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v unsafe.Pointer
+}
+
+// NewUnsafePointer creates a new UnsafePointer.
+func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer {
+ return &UnsafePointer{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (p *UnsafePointer) Load() unsafe.Pointer {
+ return atomic.LoadPointer(&p.v)
+}
+
+// Store atomically stores the passed value.
+func (p *UnsafePointer) Store(val unsafe.Pointer) {
+ atomic.StorePointer(&p.v, val)
+}
+
+// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value.
+func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
+ return atomic.SwapPointer(&p.v, val)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap
+func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
+ return p.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) {
+ return atomic.CompareAndSwapPointer(&p.v, old, new)
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/atomic/value.go b/cluster-autoscaler/vendor/go.uber.org/atomic/value.go
index 671f3a382475..52caedb9a58f 100644
--- a/cluster-autoscaler/vendor/go.uber.org/atomic/value.go
+++ b/cluster-autoscaler/vendor/go.uber.org/atomic/value.go
@@ -25,7 +25,7 @@ import "sync/atomic"
// Value shadows the type of the same name from sync/atomic
// https://godoc.org/sync/atomic#Value
type Value struct {
- atomic.Value
-
_ nocmp // disallow non-atomic comparison
+
+ atomic.Value
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/.travis.yml b/cluster-autoscaler/vendor/go.uber.org/multierr/.travis.yml
deleted file mode 100644
index 8636ab42ad14..000000000000
--- a/cluster-autoscaler/vendor/go.uber.org/multierr/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/multierr
-
-env:
- global:
- - GO111MODULE=on
-
-go:
- - oldstable
- - stable
-
-before_install:
-- go version
-
-script:
-- |
- set -e
- make lint
- make cover
-
-after_success:
-- bash <(curl -s https://codecov.io/bash)
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/CHANGELOG.md b/cluster-autoscaler/vendor/go.uber.org/multierr/CHANGELOG.md
index 6f1db9ef4a0a..f8177b978ca3 100644
--- a/cluster-autoscaler/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/cluster-autoscaler/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,41 @@
Releases
========
+v1.11.0 (2023-03-28)
+====================
+- `Errors` now supports any error that implements multiple-error
+ interface.
+- Add `Every` function to allow checking if all errors in the chain
+ satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
+v1.9.0 (2022-12-12)
+===================
+
+- Add `AppendFunc` that allow passsing functions to similar to
+ `AppendInvoke`.
+
+- Bump up yaml.v3 dependency to 3.0.1.
+
+v1.8.0 (2022-02-28)
+===================
+
+- `Combine`: perform zero allocations when there are no errors.
+
+
+v1.7.0 (2021-05-06)
+===================
+
+- Add `AppendInvoke` to append into errors from `defer` blocks.
+
+
v1.6.0 (2020-09-14)
===================
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/LICENSE.txt b/cluster-autoscaler/vendor/go.uber.org/multierr/LICENSE.txt
index 858e02475f16..413e30f7ce21 100644
--- a/cluster-autoscaler/vendor/go.uber.org/multierr/LICENSE.txt
+++ b/cluster-autoscaler/vendor/go.uber.org/multierr/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2017 Uber Technologies, Inc.
+Copyright (c) 2017-2021 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/Makefile b/cluster-autoscaler/vendor/go.uber.org/multierr/Makefile
index 316004400b89..dcb6fe723c05 100644
--- a/cluster-autoscaler/vendor/go.uber.org/multierr/Makefile
+++ b/cluster-autoscaler/vendor/go.uber.org/multierr/Makefile
@@ -34,9 +34,5 @@ lint: gofmt golint staticcheck
.PHONY: cover
cover:
- go test -coverprofile=cover.out -coverpkg=./... -v ./...
+ go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
go tool cover -html=cover.out -o cover.html
-
-update-license:
- @cd tools && go install go.uber.org/tools/update-license
- @$(GOBIN)/update-license $(GO_FILES)
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/README.md b/cluster-autoscaler/vendor/go.uber.org/multierr/README.md
index 751bd65e5811..5ab6ac40f404 100644
--- a/cluster-autoscaler/vendor/go.uber.org/multierr/README.md
+++ b/cluster-autoscaler/vendor/go.uber.org/multierr/README.md
@@ -2,9 +2,29 @@
`multierr` allows combining one or more Go `error`s together.
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
## Installation
- go get -u go.uber.org/multierr
+```bash
+go get -u go.uber.org/multierr@latest
+```
## Status
@@ -15,9 +35,9 @@ Stable: No breaking changes will be made before 2.0.
Released under the [MIT License].
[MIT License]: LICENSE.txt
-[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
-[doc]: https://godoc.org/go.uber.org/multierr
-[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
+[doc]: https://pkg.go.dev/go.uber.org/multierr
+[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
-[ci]: https://travis-ci.com/uber-go/multierr
+[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/error.go b/cluster-autoscaler/vendor/go.uber.org/multierr/error.go
index 5c9b67d5379e..3a828b2dff8c 100644
--- a/cluster-autoscaler/vendor/go.uber.org/multierr/error.go
+++ b/cluster-autoscaler/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,54 +20,109 @@
// Package multierr allows combining one or more errors together.
//
-// Overview
+// # Overview
//
// Errors can be combined with the use of the Combine function.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// conn.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
//
// If only two errors are being combined, the Append function may be used
// instead.
//
-// err = multierr.Append(reader.Close(), writer.Close())
-//
-// This makes it possible to record resource cleanup failures from deferred
-// blocks with the help of named return values.
-//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer func() {
-// err = multierr.Append(err, conn.Close())
-// }()
-// // ...
-// }
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The underlying list of errors for a returned error object may be retrieved
// with the Errors function.
//
-// errors := multierr.Errors(err)
-// if len(errors) > 0 {
-// fmt.Println("The following errors occurred:", errors)
-// }
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
+//
+// # Appending from a loop
+//
+// You sometimes need to append into an error from a loop.
+//
+// var err error
+// for _, item := range items {
+// err = multierr.Append(err, process(item))
+// }
+//
+// Cases like this may require knowledge of whether an individual instance
+// failed. This usually requires introduction of a new variable.
+//
+// var err error
+// for _, item := range items {
+// if perr := process(item); perr != nil {
+// log.Warn("skipping item", item)
+// err = multierr.Append(err, perr)
+// }
+// }
+//
+// multierr includes AppendInto to simplify cases like this.
+//
+// var err error
+// for _, item := range items {
+// if multierr.AppendInto(&err, process(item)) {
+// log.Warn("skipping item", item)
+// }
+// }
+//
+// This will append the error into the err variable, and return true if that
+// individual error was non-nil.
//
-// Advanced Usage
+// See [AppendInto] for more information.
+//
+// # Deferred Functions
+//
+// Go makes it possible to modify the return value of a function in a defer
+// block if the function was using named returns. This makes it possible to
+// record resource cleanup failures from deferred blocks.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
+//
+// multierr provides the Invoker type and AppendInvoke function to make cases
+// like the above simpler and obviate the need for a closure. The following is
+// roughly equivalent to the example above.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(conn))
+// // ...
+// }
+//
+// See [AppendInvoke] and [Invoker] for more information.
+//
+// NOTE: If you're modifying an error from inside a defer, you MUST use a named
+// return value for that function.
+//
+// # Advanced Usage
//
// Errors returned by Combine and Append MAY implement the following
// interface.
//
-// type errorGroup interface {
-// // Returns a slice containing the underlying list of errors.
-// //
-// // This slice MUST NOT be modified by the caller.
-// Errors() []error
-// }
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
//
// Note that if you need access to list of errors behind a multierr error, you
// should prefer using the Errors function. That said, if you need cheap
@@ -76,23 +131,23 @@
// because errors returned by Combine and Append are not guaranteed to
// implement this interface.
//
-// var errors []error
-// group, ok := err.(errorGroup)
-// if ok {
-// errors = group.Errors()
-// } else {
-// errors = []error{err}
-// }
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
package multierr // import "go.uber.org/multierr"
import (
"bytes"
+ "errors"
"fmt"
"io"
"strings"
"sync"
-
- "go.uber.org/atomic"
+ "sync/atomic"
)
var (
@@ -132,34 +187,15 @@ type errorGroup interface {
// Errors returns a slice containing zero or more errors that the supplied
// error is composed of. If the error is nil, a nil slice is returned.
//
-// err := multierr.Append(r.Close(), w.Close())
-// errors := multierr.Errors(err)
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
//
// If the error is not composed of other errors, the returned slice contains
// just the error that was passed in.
//
// Callers of this function are free to modify the returned slice.
func Errors(err error) []error {
- if err == nil {
- return nil
- }
-
- // Note that we're casting to multiError, not errorGroup. Our contract is
- // that returned errors MAY implement errorGroup. Errors, however, only
- // has special behavior for multierr-specific error objects.
- //
- // This behavior can be expanded in the future but I think it's prudent to
- // start with as little as possible in terms of contract and possibility
- // of misuse.
- eg, ok := err.(*multiError)
- if !ok {
- return []error{err}
- }
-
- errors := eg.Errors()
- result := make([]error, len(errors))
- copy(result, errors)
- return result
+ return extractErrors(err)
}
// multiError is an error that holds one or more errors.
@@ -174,8 +210,6 @@ type multiError struct {
errors []error
}
-var _ errorGroup = (*multiError)(nil)
-
// Errors returns the list of underlying errors.
//
// This slice MUST NOT be modified.
@@ -201,6 +235,17 @@ func (merr *multiError) Error() string {
return result
}
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+ for _, e := range extractErrors(err) {
+ if !errors.Is(e, target) {
+ return false
+ }
+ }
+ return true
+}
+
func (merr *multiError) Format(f fmt.State, c rune) {
if c == 'v' && f.Flag('+') {
merr.writeMultiline(f)
@@ -292,6 +337,14 @@ func inspect(errors []error) (res inspectResult) {
// fromSlice converts the given list of errors into a single error.
func fromSlice(errors []error) error {
+ // Don't pay to inspect small slices.
+ switch len(errors) {
+ case 0:
+ return nil
+ case 1:
+ return errors[0]
+ }
+
res := inspect(errors)
switch res.Count {
case 0:
@@ -301,8 +354,12 @@ func fromSlice(errors []error) error {
return errors[res.FirstErrorIdx]
case len(errors):
if !res.ContainsMultiError {
- // already flat
- return &multiError{errors: errors}
+ // Error list is flat. Make a copy of it
+ // Otherwise "errors" escapes to the heap
+ // unconditionally for all other cases.
+ // This lets us optimize for the "no errors" case.
+ out := append(([]error)(nil), errors...)
+ return &multiError{errors: out}
}
}
@@ -327,32 +384,32 @@ func fromSlice(errors []error) error {
// If zero arguments were passed or if all items are nil, a nil error is
// returned.
//
-// Combine(nil, nil) // == nil
+// Combine(nil, nil) // == nil
//
// If only a single error was passed, it is returned as-is.
//
-// Combine(err) // == err
+// Combine(err) // == err
//
// Combine skips over nil arguments so this function may be used to combine
// together errors from operations that fail independently of each other.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// pipe.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
//
// If any of the passed errors is a multierr error, it will be flattened along
// with the other errors.
//
-// multierr.Combine(multierr.Combine(err1, err2), err3)
-// // is the same as
-// multierr.Combine(err1, err2, err3)
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
//
// The returned error formats into a readable multi-line error message if
// formatted with %+v.
//
-// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
func Combine(errors ...error) error {
return fromSlice(errors)
}
@@ -362,16 +419,19 @@ func Combine(errors ...error) error {
// This function is a specialization of Combine for the common case where
// there are only two errors.
//
-// err = multierr.Append(reader.Close(), writer.Close())
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The following pattern may also be used to record failure of deferred
// operations without losing information about the original error.
//
-// func doSomething(..) (err error) {
-// f := acquireResource()
-// defer func() {
-// err = multierr.Append(err, f.Close())
-// }()
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+//
+// Note that the variable MUST be a named return to append an error to it from
+// the defer statement. See also [AppendInvoke].
func Append(left error, right error) error {
switch {
case left == nil:
@@ -401,37 +461,37 @@ func Append(left error, right error) error {
// AppendInto appends an error into the destination of an error pointer and
// returns whether the error being appended was non-nil.
//
-// var err error
-// multierr.AppendInto(&err, r.Close())
-// multierr.AppendInto(&err, w.Close())
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
//
// The above is equivalent to,
//
-// err := multierr.Append(r.Close(), w.Close())
+// err := multierr.Append(r.Close(), w.Close())
//
// As AppendInto reports whether the provided error was non-nil, it may be
// used to build a multierr error in a loop more ergonomically. For example:
//
-// var err error
-// for line := range lines {
-// var item Item
-// if multierr.AppendInto(&err, parse(line, &item)) {
-// continue
-// }
-// items = append(items, item)
-// }
-//
-// Compare this with a verison that relies solely on Append:
-//
-// var err error
-// for line := range lines {
-// var item Item
-// if parseErr := parse(line, &item); parseErr != nil {
-// err = multierr.Append(err, parseErr)
-// continue
-// }
-// items = append(items, item)
-// }
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
+//
+// Compare this with a version that relies solely on Append:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
func AppendInto(into *error, err error) (errored bool) {
if into == nil {
// We panic if 'into' is nil. This is not documented above
@@ -447,3 +507,140 @@ func AppendInto(into *error, err error) (errored bool) {
*into = Append(*into, err)
return true
}
+
+// Invoker is an operation that may fail with an error. Use it with
+// AppendInvoke to append the result of calling the function into an error.
+// This allows you to conveniently defer capture of failing operations.
+//
+// See also, [Close] and [Invoke].
+type Invoker interface {
+ Invoke() error
+}
+
+// Invoke wraps a function which may fail with an error to match the Invoker
+// interface. Use it to supply functions matching this signature to
+// AppendInvoke.
+//
+// For example,
+//
+// func processReader(r io.Reader) (err error) {
+// scanner := bufio.NewScanner(r)
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// for scanner.Scan() {
+// // ...
+// }
+// // ...
+// }
+//
+// In this example, the following line will construct the Invoker right away,
+// but defer the invocation of scanner.Err() until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+type Invoke func() error
+
+// Invoke calls the supplied function and returns its result.
+func (i Invoke) Invoke() error { return i() }
+
+// Close builds an Invoker that closes the provided io.Closer. Use it with
+// AppendInvoke to close io.Closers and append their results into an error.
+//
+// For example,
+//
+// func processFile(path string) (err error) {
+// f, err := os.Open(path)
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// return processReader(f)
+// }
+//
+// In this example, multierr.Close will construct the Invoker right away, but
+// defer the invocation of f.Close until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+func Close(closer io.Closer) Invoker {
+ return Invoke(closer.Close)
+}
+
+// AppendInvoke appends the result of calling the given Invoker into the
+// provided error pointer. Use it with named returns to safely defer
+// invocation of fallible operations until a function returns, and capture the
+// resulting errors.
+//
+// func doSomething(...) (err error) {
+// // ...
+// f, err := openFile(..)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call f.Close() when this function returns and
+// // if the operation fails, its append its error into the
+// // returned error.
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// scanner := bufio.NewScanner(f)
+// // Similarly, this scheduled scanner.Err to be called and
+// // inspected when the function returns and append its error
+// // into the returned error.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// // ...
+// }
+//
+// NOTE: If used with a defer, the error variable MUST be a named return.
+//
+// Without defer, AppendInvoke behaves exactly like AppendInto.
+//
+// err := // ...
+// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+//
+// // ...is roughly equivalent to...
+//
+// err := // ...
+// multierr.AppendInto(&err, foo())
+//
+// The advantage of the indirection introduced by Invoker is to make it easy
+// to defer the invocation of a function. Without this indirection, the
+// invoked function will be evaluated at the time of the defer block rather
+// than when the function returns.
+//
+// // BAD: This is likely not what the caller intended. This will evaluate
+// // foo() right away and append its result into the error when the
+// // function returns.
+// defer multierr.AppendInto(&err, foo())
+//
+// // GOOD: This will defer invocation of foo unutil the function returns.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+//
+// multierr provides a few Invoker implementations out of the box for
+// convenience. See [Invoker] for more information.
+func AppendInvoke(into *error, invoker Invoker) {
+ AppendInto(into, invoker.Invoke())
+}
+
+// AppendFunc is a shorthand for [AppendInvoke].
+// It allows using function or method value directly
+// without having to wrap it into an [Invoker] interface.
+//
+// func doSomething(...) (err error) {
+// w, err := startWorker(...)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call w.Stop() when this function returns and
+// // if the operation fails, it appends its error into the
+// // returned error.
+// defer multierr.AppendFunc(&err, w.Stop)
+// }
+func AppendFunc(into *error, fn func() error) {
+ AppendInvoke(into, Invoke(fn))
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/error_post_go120.go b/cluster-autoscaler/vendor/go.uber.org/multierr/error_post_go120.go
new file mode 100644
index 000000000000..a173f9c25152
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/multierr/error_post_go120.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.20
+// +build go1.20
+
+package multierr
+
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
+
+type multipleErrors interface {
+ Unwrap() []error
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // check if the given err is an Unwrapable error that
+ // implements multipleErrors interface.
+ eg, ok := err.(multipleErrors)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Unwrap()...)
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/go113.go b/cluster-autoscaler/vendor/go.uber.org/multierr/error_pre_go120.go
similarity index 66%
rename from cluster-autoscaler/vendor/go.uber.org/multierr/go113.go
rename to cluster-autoscaler/vendor/go.uber.org/multierr/error_pre_go120.go
index 264b0eac0ddc..93872a3fcd13 100644
--- a/cluster-autoscaler/vendor/go.uber.org/multierr/go113.go
+++ b/cluster-autoscaler/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,12 +18,19 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// +build go1.13
+//go:build !go1.20
+// +build !go1.20
package multierr
import "errors"
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
// As attempts to find the first error in the error list that matches the type
// of the value that target points to.
//
@@ -50,3 +57,23 @@ func (merr *multiError) Is(target error) bool {
}
return false
}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/multierr/glide.yaml b/cluster-autoscaler/vendor/go.uber.org/multierr/glide.yaml
deleted file mode 100644
index 6ef084ec242f..000000000000
--- a/cluster-autoscaler/vendor/go.uber.org/multierr/glide.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-package: go.uber.org/multierr
-import:
-- package: go.uber.org/atomic
- version: ^1
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/.readme.tmpl b/cluster-autoscaler/vendor/go.uber.org/zap/.readme.tmpl
index 3154a1e64cf2..92aa65d660b6 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/.readme.tmpl
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/.readme.tmpl
@@ -96,14 +96,14 @@ Released under the [MIT License](LICENSE.txt).
In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
-pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
-[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
-[doc]: https://godoc.org/go.uber.org/zap
-[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/zap
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
-[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/CHANGELOG.md b/cluster-autoscaler/vendor/go.uber.org/zap/CHANGELOG.md
index fdfef8808ab5..0db1f9f15fee 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/CHANGELOG.md
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/CHANGELOG.md
@@ -1,4 +1,117 @@
# Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## 1.24.0 (30 Nov 2022)
+
+Enhancements:
+* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
+ current minimum enabled log level.
+* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
+
+Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
+contributions to this release.
+
+[#1148]: https://github.coml/uber-go/zap/pull/1148
+[#1185]: https://github.coml/uber-go/zap/pull/1185
+
+## 1.23.0 (24 Aug 2022)
+
+Enhancements:
+* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
+ `LevelEnabler` or `Core`.
+* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
+ that implement `String() string`.
+
+[#1147]: https://github.com/uber-go/zap/pull/1147
+[#1155]: https://github.com/uber-go/zap/pull/1155
+
+
+## 1.22.0 (8 Aug 2022)
+
+Enhancements:
+* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
+ arrays of objects. With these two constructors, you don't need to implement
+ `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
+ `zapcore.ObjectMarshaler`.
+* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
+ `SugaredLogger` with the provided options applied.
+* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
+ These functions provide a string joining behavior similar to `fmt.Println`.
+* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
+ logger for `Fatal`-level log entries. This defaults to exiting the program.
+* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
+ `NewDevelopment` to panic if the system was unable to build the logger.
+* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
+ a statement dynamically.
+
+Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
+for their contributions to this release.
+
+[#1071]: https://github.com/uber-go/zap/pull/1071
+[#1079]: https://github.com/uber-go/zap/pull/1079
+[#1080]: https://github.com/uber-go/zap/pull/1080
+[#1088]: https://github.com/uber-go/zap/pull/1088
+[#1108]: https://github.com/uber-go/zap/pull/1108
+[#1118]: https://github.com/uber-go/zap/pull/1118
+
+## 1.21.0 (7 Feb 2022)
+
+Enhancements:
+* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
+* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
+ string.
+
+Bugfixes:
+* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
+
+Other changes:
+* [#1052][]: Improve encoding performance when the `AddCaller` and
+ `AddStacktrace` options are used together.
+
+[#1047]: https://github.com/uber-go/zap/pull/1047
+[#1048]: https://github.com/uber-go/zap/pull/1048
+[#1052]: https://github.com/uber-go/zap/pull/1052
+[#1058]: https://github.com/uber-go/zap/pull/1058
+
+Thanks to @aerosol and @Techassi for their contributions to this release.
+
+## 1.20.0 (4 Jan 2022)
+
+Enhancements:
+* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
+ characters between log statements.
+* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
+ encoding of reflected log fields.
+
+Bugfixes:
+* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
+* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
+ methods when the methods return.
+* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
+
+Other changes:
+* [#1028][]: Drop support for Go < 1.15.
+
+[#554]: https://github.com/uber-go/zap/pull/554
+[#989]: https://github.com/uber-go/zap/pull/989
+[#1011]: https://github.com/uber-go/zap/pull/1011
+[#1017]: https://github.com/uber-go/zap/pull/1017
+[#1028]: https://github.com/uber-go/zap/pull/1028
+[#1033]: https://github.com/uber-go/zap/pull/1033
+[#1039]: https://github.com/uber-go/zap/pull/1039
+
+Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
+
+## 1.19.1 (8 Sep 2021)
+
+Bugfixes:
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
## 1.19.0 (9 Aug 2021)
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/CONTRIBUTING.md b/cluster-autoscaler/vendor/go.uber.org/zap/CONTRIBUTING.md
index 5cd965687138..ea02f3cae2d6 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/CONTRIBUTING.md
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -16,7 +16,7 @@ you to accept the CLA when you open your pull request.
[Fork][fork], then clone the repository:
-```
+```bash
mkdir -p $GOPATH/src/go.uber.org
cd $GOPATH/src/go.uber.org
git clone git@github.com:your_github_username/zap.git
@@ -27,21 +27,16 @@ git fetch upstream
Make sure that the tests and the linters pass:
-```
+```bash
make test
make lint
```
-If you're not using the minor version of Go specified in the Makefile's
-`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
-fine, but it means that you'll only discover lint failures after you open your
-pull request.
-
## Making Changes
Start by creating a new branch for your changes:
-```
+```bash
cd $GOPATH/src/go.uber.org/zap
git checkout master
git fetch upstream
@@ -52,22 +47,22 @@ git checkout -b cool_new_feature
Make your changes, then ensure that `make lint` and `make test` still pass. If
you're satisfied with your changes, push them to your fork.
-```
+```bash
git push origin cool_new_feature
```
Then use the GitHub UI to open a pull request.
-At this point, you're waiting on us to review your changes. We *try* to respond
+At this point, you're waiting on us to review your changes. We _try_ to respond
to issues and pull requests within a few business days, and we may suggest some
improvements or alternatives. Once your changes are approved, one of the
project maintainers will merge them.
We're much more likely to approve your changes if you:
-* Add tests for new functionality.
-* Write a [good commit message][commit-message].
-* Maintain backward compatibility.
+- Add tests for new functionality.
+- Write a [good commit message][commit-message].
+- Maintain backward compatibility.
[fork]: https://github.com/uber-go/zap/fork
[open-issue]: https://github.com/uber-go/zap/issues/new
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/README.md b/cluster-autoscaler/vendor/go.uber.org/zap/README.md
index 1e64d6cffc13..a553a428c8f6 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/README.md
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/README.md
@@ -54,7 +54,7 @@ and make many small allocations. Put differently, using `encoding/json` and
Zap takes a different approach. It includes a reflection-free, zero-allocation
JSON encoder, and the base `Logger` strives to avoid serialization overhead
and allocations wherever possible. By building the high-level `SugaredLogger`
-on that foundation, zap lets users *choose* when they need to count every
+on that foundation, zap lets users _choose_ when they need to count every
allocation and when they'd prefer a more familiar, loosely typed API.
As measured by its own [benchmarking suite][], not only is zap more performant
@@ -64,40 +64,40 @@ id="anchor-versions">[1](#footnote-versions)
Log a message and 10 fields:
-| Package | Time | Time % to zap | Objects Allocated |
-| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 862 ns/op | +0% | 5 allocs/op
-| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
-| zerolog | 4021 ns/op | +366% | 76 allocs/op
-| go-kit | 4542 ns/op | +427% | 105 allocs/op
-| apex/log | 26785 ns/op | +3007% | 115 allocs/op
-| logrus | 29501 ns/op | +3322% | 125 allocs/op
-| log15 | 29906 ns/op | +3369% | 122 allocs/op
+| Package | Time | Time % to zap | Objects Allocated |
+| :------------------ | :---------: | :-----------: | :---------------: |
+| :zap: zap | 2900 ns/op | +0% | 5 allocs/op |
+| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op |
+| zerolog | 10639 ns/op | +267% | 32 allocs/op |
+| go-kit | 14434 ns/op | +398% | 59 allocs/op |
+| logrus | 17104 ns/op | +490% | 81 allocs/op |
+| apex/log | 32424 ns/op | +1018% | 66 allocs/op |
+| log15 | 33579 ns/op | +1058% | 76 allocs/op |
Log a message with a logger that already has 10 fields of context:
-| Package | Time | Time % to zap | Objects Allocated |
-| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 126 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
-| zerolog | 88 ns/op | -30% | 0 allocs/op
-| go-kit | 5087 ns/op | +3937% | 103 allocs/op
-| log15 | 18548 ns/op | +14621% | 73 allocs/op
-| apex/log | 26012 ns/op | +20544% | 104 allocs/op
-| logrus | 27236 ns/op | +21516% | 113 allocs/op
+| Package | Time | Time % to zap | Objects Allocated |
+| :------------------ | :---------: | :-----------: | :---------------: |
+| :zap: zap | 373 ns/op | +0% | 0 allocs/op |
+| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op |
+| zerolog | 288 ns/op | -23% | 0 allocs/op |
+| go-kit | 11785 ns/op | +3060% | 58 allocs/op |
+| logrus | 19629 ns/op | +5162% | 70 allocs/op |
+| log15 | 21866 ns/op | +5762% | 72 allocs/op |
+| apex/log | 30890 ns/op | +8182% | 55 allocs/op |
Log a static string, without any context or `printf`-style templating:
-| Package | Time | Time % to zap | Objects Allocated |
-| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 118 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
-| zerolog | 93 ns/op | -21% | 0 allocs/op
-| go-kit | 280 ns/op | +137% | 11 allocs/op
-| standard library | 499 ns/op | +323% | 2 allocs/op
-| apex/log | 1990 ns/op | +1586% | 10 allocs/op
-| logrus | 3129 ns/op | +2552% | 24 allocs/op
-| log15 | 3887 ns/op | +3194% | 23 allocs/op
+| Package | Time | Time % to zap | Objects Allocated |
+| :------------------ | :--------: | :-----------: | :---------------: |
+| :zap: zap | 381 ns/op | +0% | 0 allocs/op |
+| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op |
+| zerolog | 369 ns/op | -3% | 0 allocs/op |
+| standard library | 385 ns/op | +1% | 2 allocs/op |
+| go-kit | 606 ns/op | +59% | 11 allocs/op |
+| logrus | 1730 ns/op | +354% | 25 allocs/op |
+| apex/log | 1998 ns/op | +424% | 7 allocs/op |
+| log15 | 4546 ns/op | +1093% | 22 allocs/op |
## Development Status: Stable
@@ -131,4 +131,3 @@ pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
-
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/array_go118.go b/cluster-autoscaler/vendor/go.uber.org/zap/array_go118.go
new file mode 100644
index 000000000000..d0d2c49d698a
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/array_go118.go
@@ -0,0 +1,156 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18
+// +build go1.18
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Objects constructs a field with the given key, holding a list of the
+// provided objects that can be marshaled by Zap.
+//
+// Note that these objects must implement zapcore.ObjectMarshaler directly.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the Request type, not its pointer (*Request).
+// If it's on the pointer, use ObjectValues.
+//
+// Given an object that implements MarshalLogObject on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Author struct{ ... }
+// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var authors []Author = ...
+// logger.Info("loading article", zap.Objects("authors", authors))
+//
+// Similarly, given a type that implements MarshalLogObject on its pointer
+// receiver, you can log a slice of pointers to that object with Objects like
+// so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+//
+// If instead, you have a slice of values of such an object, use the
+// ObjectValues constructor.
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
+ return Array(key, objects[T](values))
+}
+
+type objects[T zapcore.ObjectMarshaler] []T
+
+func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ if err := arr.AppendObject(o); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ObjectMarshalerPtr is a constraint that specifies that the given type
+// implements zapcore.ObjectMarshaler on a pointer receiver.
+type ObjectMarshalerPtr[T any] interface {
+ *T
+ zapcore.ObjectMarshaler
+}
+
+// ObjectValues constructs a field with the given key, holding a list of the
+// provided objects, where pointers to these objects can be marshaled by Zap.
+//
+// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the *Request type, not the value (Request).
+// If it's on the value, use Objects.
+//
+// Given an object that implements MarshalLogObject on the pointer receiver,
+// you can log a slice of those objects with ObjectValues like so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+//
+// If instead, you have a slice of pointers of such an object, use the Objects
+// field constructor.
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
+ return Array(key, objectValues[T, P](values))
+}
+
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
+
+func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range os {
+ // It is necessary for us to explicitly reference the "P" type.
+ // We cannot simply pass "&os[i]" to AppendObject because its type
+ // is "*T", which the type system does not consider as
+ // implementing ObjectMarshaler.
+ // Only the type "P" satisfies ObjectMarshaler, which we have
+ // to convert "*T" to explicitly.
+ var p P = &os[i]
+ if err := arr.AppendObject(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Request struct{ ... }
+// func (a Request) String() string
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+ return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ arr.AppendString(o.String())
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/config.go b/cluster-autoscaler/vendor/go.uber.org/zap/config.go
index 55637fb0b4b1..ee6096766a85 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/config.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/config.go
@@ -21,7 +21,7 @@
package zap
import (
- "fmt"
+ "errors"
"sort"
"time"
@@ -182,7 +182,7 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) {
}
if cfg.Level == (AtomicLevel{}) {
- return nil, fmt.Errorf("missing Level")
+ return nil, errors.New("missing Level")
}
log := New(
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/doc.go b/cluster-autoscaler/vendor/go.uber.org/zap/doc.go
index 8638dd1b9656..3c50d7b4d3ff 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/doc.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/doc.go
@@ -32,7 +32,7 @@
// they need to count every allocation and when they'd prefer a more familiar,
// loosely typed API.
//
-// Choosing a Logger
+// # Choosing a Logger
//
// In contexts where performance is nice, but not critical, use the
// SugaredLogger. It's 4-10x faster than other structured logging packages and
@@ -41,14 +41,15 @@
// variadic number of key-value pairs. (For more advanced use cases, they also
// accept strongly typed fields - see the SugaredLogger.With documentation for
// details.)
-// sugar := zap.NewExample().Sugar()
-// defer sugar.Sync()
-// sugar.Infow("failed to fetch URL",
-// "url", "http://example.com",
-// "attempt", 3,
-// "backoff", time.Second,
-// )
-// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// By default, loggers are unbuffered. However, since zap's low-level APIs
// allow buffering, calling Sync before letting your process exit is a good
@@ -57,32 +58,35 @@
// In the rare contexts where every microsecond and every allocation matter,
// use the Logger. It's even faster than the SugaredLogger and allocates far
// less, but it only supports strongly-typed, structured logging.
-// logger := zap.NewExample()
-// defer logger.Sync()
-// logger.Info("failed to fetch URL",
-// zap.String("url", "http://example.com"),
-// zap.Int("attempt", 3),
-// zap.Duration("backoff", time.Second),
-// )
+//
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
//
// Choosing between the Logger and SugaredLogger doesn't need to be an
// application-wide decision: converting between the two is simple and
// inexpensive.
-// logger := zap.NewExample()
-// defer logger.Sync()
-// sugar := logger.Sugar()
-// plain := sugar.Desugar()
//
-// Configuring Zap
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// # Configuring Zap
//
// The simplest way to build a Logger is to use zap's opinionated presets:
// NewExample, NewProduction, and NewDevelopment. These presets build a logger
// with a single function call:
-// logger, err := zap.NewProduction()
-// if err != nil {
-// log.Fatalf("can't initialize zap logger: %v", err)
-// }
-// defer logger.Sync()
+//
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
//
// Presets are fine for small projects, but larger projects and organizations
// naturally require a bit more customization. For most users, zap's Config
@@ -94,7 +98,7 @@
// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
// example for sample code.
//
-// Extending Zap
+// # Extending Zap
//
// The zap package itself is a relatively thin wrapper around the interfaces
// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
@@ -106,7 +110,7 @@
// Similarly, package authors can use the high-performance Encoder and Core
// implementations in the zapcore package to build their own loggers.
//
-// Frequently Asked Questions
+// # Frequently Asked Questions
//
// An FAQ covering everything from installation errors to design decisions is
// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/encoder.go b/cluster-autoscaler/vendor/go.uber.org/zap/encoder.go
index 08ed83354360..caa04ceefd81 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/encoder.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/encoder.go
@@ -63,7 +63,7 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
- return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
+ return nil, errors.New("missing EncodeTime in EncoderConfig")
}
_encoderMutex.RLock()
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/global.go b/cluster-autoscaler/vendor/go.uber.org/zap/global.go
index c1ac0507cd9b..3cb46c9e0ac5 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/global.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/global.go
@@ -31,6 +31,7 @@ import (
)
const (
+ _stdLogDefaultDepth = 1
_loggerWriterDepth = 2
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
"https://github.com/uber-go/zap/issues/new and reference this error: %v"
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/http_handler.go b/cluster-autoscaler/vendor/go.uber.org/zap/http_handler.go
index 1297c33b3285..632b6831a856 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/http_handler.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/http_handler.go
@@ -22,6 +22,7 @@ package zap
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -32,22 +33,23 @@ import (
// ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level.
//
-// GET
+// # GET
//
// The GET request returns a JSON description of the current logging level like:
-// {"level":"info"}
//
-// PUT
+// {"level":"info"}
+//
+// # PUT
//
// The PUT request changes the logging level. It is perfectly safe to change the
// logging level while a program is running. Two content types are supported:
//
-// Content-Type: application/x-www-form-urlencoded
+// Content-Type: application/x-www-form-urlencoded
//
// With this content type, the level can be provided through the request body or
// a query parameter. The log level is URL encoded like:
//
-// level=debug
+// level=debug
//
// The request body takes precedence over the query parameter, if both are
// specified.
@@ -55,18 +57,17 @@ import (
// This content type is the default for a curl PUT request. Following are two
// example curl requests that both set the logging level to debug.
//
-// curl -X PUT localhost:8080/log/level?level=debug
-// curl -X PUT localhost:8080/log/level -d level=debug
+// curl -X PUT localhost:8080/log/level?level=debug
+// curl -X PUT localhost:8080/log/level -d level=debug
//
// For any other content type, the payload is expected to be JSON encoded and
// look like:
//
-// {"level":"info"}
+// {"level":"info"}
//
// An example curl request could look like this:
//
-// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
-//
+// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
type errorResponse struct {
Error string `json:"error"`
@@ -108,7 +109,7 @@ func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error
func decodePutURL(r *http.Request) (zapcore.Level, error) {
lvl := r.FormValue("level")
if lvl == "" {
- return 0, fmt.Errorf("must specify logging level")
+ return 0, errors.New("must specify logging level")
}
var l zapcore.Level
if err := l.UnmarshalText([]byte(lvl)); err != nil {
@@ -125,7 +126,7 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) {
return 0, fmt.Errorf("malformed request body: %v", err)
}
if pld.Level == nil {
- return 0, fmt.Errorf("must specify logging level")
+ return 0, errors.New("must specify logging level")
}
return *pld.Level, nil
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/internal/exit/exit.go b/cluster-autoscaler/vendor/go.uber.org/zap/internal/exit/exit.go
index dfc5b05feb77..f673f9947b85 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/internal/exit/exit.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -24,24 +24,25 @@ package exit
import "os"
-var real = func() { os.Exit(1) }
+var _exit = os.Exit
-// Exit normally terminates the process by calling os.Exit(1). If the package
-// is stubbed, it instead records a call in the testing spy.
-func Exit() {
- real()
+// With terminates the process by calling os.Exit(code). If the package is
+// stubbed, it instead records a call in the testing spy.
+func With(code int) {
+ _exit(code)
}
// A StubbedExit is a testing fake for os.Exit.
type StubbedExit struct {
Exited bool
- prev func()
+ Code int
+ prev func(code int)
}
// Stub substitutes a fake for the call to os.Exit(1).
func Stub() *StubbedExit {
- s := &StubbedExit{prev: real}
- real = s.exit
+ s := &StubbedExit{prev: _exit}
+ _exit = s.exit
return s
}
@@ -56,9 +57,10 @@ func WithStub(f func()) *StubbedExit {
// Unstub restores the previous exit function.
func (se *StubbedExit) Unstub() {
- real = se.prev
+ _exit = se.prev
}
-func (se *StubbedExit) exit() {
+func (se *StubbedExit) exit(code int) {
se.Exited = true
+ se.Code = code
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/global_go112.go b/cluster-autoscaler/vendor/go.uber.org/zap/internal/level_enabler.go
similarity index 70%
rename from cluster-autoscaler/vendor/go.uber.org/zap/global_go112.go
rename to cluster-autoscaler/vendor/go.uber.org/zap/internal/level_enabler.go
index 6b5dbda80768..5f3e3f1b924a 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/global_go112.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/internal/level_enabler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,9 +18,18 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// See #682 for more information.
-// +build go1.12
+package internal
-package zap
+import "go.uber.org/zap/zapcore"
-const _stdLogDefaultDepth = 1
+// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
+// report their own level.
+//
+// This interface is defined to use more conveniently in tests and non-zapcore
+// packages.
+// This cannot be imported from zapcore because of the cyclic dependency.
+type LeveledEnabler interface {
+ zapcore.LevelEnabler
+
+ Level() zapcore.Level
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/level.go b/cluster-autoscaler/vendor/go.uber.org/zap/level.go
index 3567a9a1e6a3..db951e19a50f 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/level.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/level.go
@@ -22,6 +22,7 @@ package zap
import (
"go.uber.org/atomic"
+ "go.uber.org/zap/internal"
"go.uber.org/zap/zapcore"
)
@@ -70,6 +71,8 @@ type AtomicLevel struct {
l *atomic.Int32
}
+var _ internal.LeveledEnabler = AtomicLevel{}
+
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled.
func NewAtomicLevel() AtomicLevel {
@@ -86,6 +89,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
return a
}
+// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseAtomicLevel(text string) (AtomicLevel, error) {
+ a := NewAtomicLevel()
+ l, err := zapcore.ParseLevel(text)
+ if err != nil {
+ return a, err
+ }
+
+ a.SetLevel(l)
+ return a, nil
+}
+
// Enabled implements the zapcore.LevelEnabler interface, which allows the
// AtomicLevel to be used in place of traditional static levels.
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/logger.go b/cluster-autoscaler/vendor/go.uber.org/zap/logger.go
index f116bd936fe2..cd44030d13f0 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/logger.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/logger.go
@@ -22,11 +22,11 @@ package zap
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
- "runtime"
"strings"
+ "go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/zapcore"
)
@@ -42,7 +42,7 @@ type Logger struct {
development bool
addCaller bool
- onFatal zapcore.CheckWriteAction // default is WriteThenFatal
+ onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
errorOutput zapcore.WriteSyncer
@@ -85,7 +85,7 @@ func New(core zapcore.Core, options ...Option) *Logger {
func NewNop() *Logger {
return &Logger{
core: zapcore.NewNopCore(),
- errorOutput: zapcore.AddSync(ioutil.Discard),
+ errorOutput: zapcore.AddSync(io.Discard),
addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock,
}
@@ -107,6 +107,19 @@ func NewDevelopment(options ...Option) (*Logger, error) {
return NewDevelopmentConfig().Build(options...)
}
+// Must is a helper that wraps a call to a function returning (*Logger, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initialization such as:
+//
+// var logger = zap.Must(zap.NewProduction())
+func Must(logger *Logger, err error) *Logger {
+ if err != nil {
+ panic(err)
+ }
+
+ return logger
+}
+
// NewExample builds a Logger that's designed for use in zap's testable
// examples. It writes DebugLevel and above logs to standard out as JSON, but
// omits the timestamp and calling function to keep example output
@@ -170,6 +183,13 @@ func (log *Logger) With(fields ...Field) *Logger {
return l
}
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (log *Logger) Level() zapcore.Level {
+ return zapcore.LevelOf(log.core)
+}
+
// Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields.
@@ -177,6 +197,14 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return log.check(lvl, msg)
}
+// Log logs a message at the specified level. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
+ if ce := log.check(lvl, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
// Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Debug(msg string, fields ...Field) {
@@ -259,8 +287,10 @@ func (log *Logger) clone() *Logger {
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
- // check must always be called directly by a method in the Logger interface
- // (e.g., Check, Info, Fatal).
+ // Logger.check must always be called directly by a method in the
+ // Logger interface (e.g., Check, Info, Fatal).
+ // This skips Logger.check and the Info/Fatal/Check/etc. method that
+ // called it.
const callerSkipOffset = 2
// Check the level first to reduce the cost of disabled log calls.
@@ -283,18 +313,27 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
- ce = ce.Should(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, zapcore.WriteThenPanic)
case zapcore.FatalLevel:
onFatal := log.onFatal
- // Noop is the default value for CheckWriteAction, and it leads to
- // continued execution after a Fatal which is unexpected.
- if onFatal == zapcore.WriteThenNoop {
+ // nil or WriteThenNoop will lead to continued execution after
+ // a Fatal log entry, which is unexpected. For example,
+ //
+ // f, err := os.Open(..)
+ // if err != nil {
+ // log.Fatal("cannot open", zap.Error(err))
+ // }
+ // fmt.Println(f.Name())
+ //
+ // The f.Name() will panic if we continue execution after the
+ // log.Fatal.
+ if onFatal == nil || onFatal == zapcore.WriteThenNoop {
onFatal = zapcore.WriteThenFatal
}
- ce = ce.Should(ent, onFatal)
+ ce = ce.After(ent, onFatal)
case zapcore.DPanicLevel:
if log.development {
- ce = ce.Should(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, zapcore.WriteThenPanic)
}
}
@@ -307,42 +346,55 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
- if log.addCaller {
- frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
- if !defined {
+
+ addStack := log.addStack.Enabled(ce.Level)
+ if !log.addCaller && !addStack {
+ return ce
+ }
+
+ // Adding the caller or stack trace requires capturing the callers of
+ // this function. We'll share information between these two.
+ stackDepth := stacktraceFirst
+ if addStack {
+ stackDepth = stacktraceFull
+ }
+ stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
+ defer stack.Free()
+
+ if stack.Count() == 0 {
+ if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
log.errorOutput.Sync()
}
+ return ce
+ }
- ce.Entry.Caller = zapcore.EntryCaller{
- Defined: defined,
+ frame, more := stack.Next()
+
+ if log.addCaller {
+ ce.Caller = zapcore.EntryCaller{
+ Defined: frame.PC != 0,
PC: frame.PC,
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
}
- if log.addStack.Enabled(ce.Entry.Level) {
- ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
- }
- return ce
-}
+ if addStack {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
-// getCallerFrame gets caller frame. The argument skip is the number of stack
-// frames to ascend, with 0 identifying the caller of getCallerFrame. The
-// boolean ok is false if it was not possible to recover the information.
-//
-// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
-func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
- const skipOffset = 2 // skip getCallerFrame and Callers
-
- pc := make([]uintptr, 1)
- numFrames := runtime.Callers(skip+skipOffset, pc)
- if numFrames < 1 {
- return
+ stackfmt := newStackFormatter(buffer)
+
+ // We've already extracted the first frame, so format that
+ // separately and defer to stackfmt for the rest.
+ stackfmt.FormatFrame(frame)
+ if more {
+ stackfmt.FormatStack(stack)
+ }
+ ce.Stack = buffer.String()
}
- frame, _ = runtime.CallersFrames(pc).Next()
- return frame, frame.PC != 0
+ return ce
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/options.go b/cluster-autoscaler/vendor/go.uber.org/zap/options.go
index e9e66161f510..c4f3bca3d202 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/options.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/options.go
@@ -133,9 +133,28 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
}
// OnFatal sets the action to take on fatal logs.
+//
+// Deprecated: Use [WithFatalHook] instead.
func OnFatal(action zapcore.CheckWriteAction) Option {
+ return WithFatalHook(action)
+}
+
+// WithFatalHook sets a CheckWriteHook to run on fatal logs.
+// Zap will call this hook after writing a log statement with a Fatal level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a fatal log message, but it will not exit the
+// program.
+//
+// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
+//
+// It is important that the provided CheckWriteHook stops the control flow at
+// the current statement to meet expectations of callers of the logger.
+// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
+// minimum.
+func WithFatalHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
- log.onFatal = action
+ log.onFatal = hook
})
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/sink.go b/cluster-autoscaler/vendor/go.uber.org/zap/sink.go
index df46fa87a70a..478c9a10ffc3 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/sink.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/sink.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -26,6 +26,7 @@ import (
"io"
"net/url"
"os"
+ "path/filepath"
"strings"
"sync"
@@ -34,23 +35,7 @@ import (
const schemeFile = "file"
-var (
- _sinkMutex sync.RWMutex
- _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
-)
-
-func init() {
- resetSinkRegistry()
-}
-
-func resetSinkRegistry() {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
-
- _sinkFactories = map[string]func(*url.URL) (Sink, error){
- schemeFile: newFileSink,
- }
-}
+var _sinkRegistry = newSinkRegistry()
// Sink defines the interface to write to and close logger destinations.
type Sink interface {
@@ -58,10 +43,6 @@ type Sink interface {
io.Closer
}
-type nopCloserSink struct{ zapcore.WriteSyncer }
-
-func (nopCloserSink) Close() error { return nil }
-
type errSinkNotFound struct {
scheme string
}
@@ -70,16 +51,29 @@ func (e *errSinkNotFound) Error() string {
return fmt.Sprintf("no sink found for scheme %q", e.scheme)
}
-// RegisterSink registers a user-supplied factory for all sinks with a
-// particular scheme.
-//
-// All schemes must be ASCII, valid under section 3.1 of RFC 3986
-// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
-// have a factory registered. Zap automatically registers a factory for the
-// "file" scheme.
-func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type sinkRegistry struct {
+ mu sync.Mutex
+ factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+ openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
+}
+
+func newSinkRegistry() *sinkRegistry {
+ sr := &sinkRegistry{
+ factories: make(map[string]func(*url.URL) (Sink, error)),
+ openFile: os.OpenFile,
+ }
+ sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+ return sr
+}
+
+// RegisterScheme registers the given factory for the specific scheme.
+func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
if scheme == "" {
return errors.New("can't register a sink factory for empty string")
@@ -88,14 +82,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
}
- if _, ok := _sinkFactories[normalized]; ok {
+ if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized)
}
- _sinkFactories[normalized] = factory
+ sr.factories[normalized] = factory
return nil
}
-func newSink(rawURL string) (Sink, error) {
+func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
+ // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
+ // the drive, and path is unset unless `c:/log.txt` is used.
+ // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
+ // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
+ if filepath.IsAbs(rawURL) {
+ return sr.newFileSinkFromPath(rawURL)
+ }
+
u, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@@ -104,16 +106,27 @@ func newSink(rawURL string) (Sink, error) {
u.Scheme = schemeFile
}
- _sinkMutex.RLock()
- factory, ok := _sinkFactories[u.Scheme]
- _sinkMutex.RUnlock()
+ sr.mu.Lock()
+ factory, ok := sr.factories[u.Scheme]
+ sr.mu.Unlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
-func newFileSink(u *url.URL) (Sink, error) {
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 0.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ return _sinkRegistry.RegisterSink(scheme, factory)
+}
+
+func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
}
@@ -130,13 +143,18 @@ func newFileSink(u *url.URL) (Sink, error) {
if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
}
- switch u.Path {
+
+ return sr.newFileSinkFromPath(u.Path)
+}
+
+func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
+ switch path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
- return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
}
func normalizeScheme(s string) (string, error) {
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/stacktrace.go b/cluster-autoscaler/vendor/go.uber.org/zap/stacktrace.go
index 0cf8c1ddffa1..817a3bde8b10 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/stacktrace.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/stacktrace.go
@@ -24,62 +24,153 @@ import (
"runtime"
"sync"
+ "go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
)
-var (
- _stacktracePool = sync.Pool{
- New: func() interface{} {
- return newProgramCounters(64)
- },
- }
+var _stacktracePool = sync.Pool{
+ New: func() interface{} {
+ return &stacktrace{
+ storage: make([]uintptr, 64),
+ }
+ },
+}
+
+type stacktrace struct {
+ pcs []uintptr // program counters; always a subslice of storage
+ frames *runtime.Frames
+
+ // The size of pcs varies depending on requirements:
+ // it will be one if the only the first frame was requested,
+ // and otherwise it will reflect the depth of the call stack.
+ //
+ // storage decouples the slice we need (pcs) from the slice we pool.
+ // We will always allocate a reasonably large storage, but we'll use
+ // only as much of it as we need.
+ storage []uintptr
+}
+
+// stacktraceDepth specifies how deep of a stack trace should be captured.
+type stacktraceDepth int
+
+const (
+ // stacktraceFirst captures only the first frame.
+ stacktraceFirst stacktraceDepth = iota
+
+ // stacktraceFull captures the entire call stack, allocating more
+ // storage for it if needed.
+ stacktraceFull
)
-func takeStacktrace(skip int) string {
- buffer := bufferpool.Get()
- defer buffer.Free()
- programCounters := _stacktracePool.Get().(*programCounters)
- defer _stacktracePool.Put(programCounters)
-
- var numFrames int
- for {
- // Skip the call to runtime.Callers and takeStacktrace so that the
- // program counters start at the caller of takeStacktrace.
- numFrames = runtime.Callers(skip+2, programCounters.pcs)
- if numFrames < len(programCounters.pcs) {
- break
- }
- // Don't put the too-short counter slice back into the pool; this lets
- // the pool adjust if we consistently take deep stacktraces.
- programCounters = newProgramCounters(len(programCounters.pcs) * 2)
+// captureStacktrace captures a stack trace of the specified depth, skipping
+// the provided number of frames. skip=0 identifies the caller of
+// captureStacktrace.
+//
+// The caller must call Free on the returned stacktrace after using it.
+func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
+ stack := _stacktracePool.Get().(*stacktrace)
+
+ switch depth {
+ case stacktraceFirst:
+ stack.pcs = stack.storage[:1]
+ case stacktraceFull:
+ stack.pcs = stack.storage
}
- i := 0
- frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
+ // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
+ // itself. +2 to skip captureStacktrace and runtime.Callers.
+ numFrames := runtime.Callers(
+ skip+2,
+ stack.pcs,
+ )
- // Note: On the last iteration, frames.Next() returns false, with a valid
- // frame, but we ignore this frame. The last frame is a a runtime frame which
- // adds noise, since it's only either runtime.main or runtime.goexit.
- for frame, more := frames.Next(); more; frame, more = frames.Next() {
- if i != 0 {
- buffer.AppendByte('\n')
+ // runtime.Callers truncates the recorded stacktrace if there is no
+ // room in the provided slice. For the full stack trace, keep expanding
+ // storage until there are fewer frames than there is room.
+ if depth == stacktraceFull {
+ pcs := stack.pcs
+ for numFrames == len(pcs) {
+ pcs = make([]uintptr, len(pcs)*2)
+ numFrames = runtime.Callers(skip+2, pcs)
}
- i++
- buffer.AppendString(frame.Function)
- buffer.AppendByte('\n')
- buffer.AppendByte('\t')
- buffer.AppendString(frame.File)
- buffer.AppendByte(':')
- buffer.AppendInt(int64(frame.Line))
+
+ // Discard old storage instead of returning it to the pool.
+ // This will adjust the pool size over time if stack traces are
+ // consistently very deep.
+ stack.storage = pcs
+ stack.pcs = pcs[:numFrames]
+ } else {
+ stack.pcs = stack.pcs[:numFrames]
}
+ stack.frames = runtime.CallersFrames(stack.pcs)
+ return stack
+}
+
+// Free releases resources associated with this stacktrace
+// and returns it back to the pool.
+func (st *stacktrace) Free() {
+ st.frames = nil
+ st.pcs = nil
+ _stacktracePool.Put(st)
+}
+
+// Count reports the total number of frames in this stacktrace.
+// Count DOES NOT change as Next is called.
+func (st *stacktrace) Count() int {
+ return len(st.pcs)
+}
+
+// Next returns the next frame in the stack trace,
+// and a boolean indicating whether there are more after it.
+func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
+ return st.frames.Next()
+}
+
+func takeStacktrace(skip int) string {
+ stack := captureStacktrace(skip+1, stacktraceFull)
+ defer stack.Free()
+
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := newStackFormatter(buffer)
+ stackfmt.FormatStack(stack)
return buffer.String()
}
-type programCounters struct {
- pcs []uintptr
+// stackFormatter formats a stack trace into a readable string representation.
+type stackFormatter struct {
+ b *buffer.Buffer
+ nonEmpty bool // whehther we've written at least one frame already
+}
+
+// newStackFormatter builds a new stackFormatter.
+func newStackFormatter(b *buffer.Buffer) stackFormatter {
+ return stackFormatter{b: b}
}
-func newProgramCounters(size int) *programCounters {
- return &programCounters{make([]uintptr, size)}
+// FormatStack formats all remaining frames in the provided stacktrace -- minus
+// the final runtime.main/runtime.goexit frame.
+func (sf *stackFormatter) FormatStack(stack *stacktrace) {
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := stack.Next(); more; frame, more = stack.Next() {
+ sf.FormatFrame(frame)
+ }
+}
+
+// FormatFrame formats the given frame.
+func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
+ if sf.nonEmpty {
+ sf.b.AppendByte('\n')
+ }
+ sf.nonEmpty = true
+ sf.b.AppendString(frame.Function)
+ sf.b.AppendByte('\n')
+ sf.b.AppendByte('\t')
+ sf.b.AppendString(frame.File)
+ sf.b.AppendByte(':')
+ sf.b.AppendInt(int64(frame.Line))
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/sugar.go b/cluster-autoscaler/vendor/go.uber.org/zap/sugar.go
index 0b9651981a90..ac387b3e47d1 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/sugar.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/sugar.go
@@ -31,6 +31,7 @@ import (
const (
_oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+ _multipleErrMsg = "Multiple errors without a key."
)
// A SugaredLogger wraps the base Logger functionality in a slower, but less
@@ -38,10 +39,19 @@ const (
// method.
//
// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
-// For each log level, it exposes three methods: one for loosely-typed
-// structured logging, one for println-style formatting, and one for
-// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
-// output with Infow ("info with" structured context), Info, or Infof.
+// For each log level, it exposes four methods:
+//
+// - methods named after the log level for log.Print-style logging
+// - methods ending in "w" for loosely-typed structured logging
+// - methods ending in "f" for log.Printf-style logging
+// - methods ending in "ln" for log.Println-style logging
+//
+// For example, the methods for InfoLevel are:
+//
+// Info(...any) Print-style logging
+// Infow(...any) Structured logging (read as "info with")
+// Infof(string, ...any) Printf-style logging
+// Infoln(...any) Println-style logging
type SugaredLogger struct {
base *Logger
}
@@ -61,27 +71,40 @@ func (s *SugaredLogger) Named(name string) *SugaredLogger {
return &SugaredLogger{base: s.base.Named(name)}
}
+// WithOptions clones the current SugaredLogger, applies the supplied Options,
+// and returns the result. It's safe to use concurrently.
+func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
+ base := s.base.clone()
+ for _, opt := range opts {
+ opt.apply(base)
+ }
+ return &SugaredLogger{base: base}
+}
+
// With adds a variadic number of fields to the logging context. It accepts a
// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
// processing pairs, the first element of the pair is used as the field key
// and the second as the field value.
//
// For example,
-// sugaredLogger.With(
-// "hello", "world",
-// "failure", errors.New("oh no"),
-// Stack(),
-// "count", 42,
-// "user", User{Name: "alice"},
-// )
+//
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+//
// is the equivalent of
-// unsugared.With(
-// String("hello", "world"),
-// String("failure", "oh no"),
-// Stack(),
-// Int("count", 42),
-// Object("user", User{Name: "alice"}),
-// )
+//
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
//
// Note that the keys in key-value pairs should be strings. In development,
// passing a non-string key panics. In production, the logger is more
@@ -92,6 +115,13 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (s *SugaredLogger) Level() zapcore.Level {
+ return zapcore.LevelOf(s.base.core)
+}
+
// Debug uses fmt.Sprint to construct and log a message.
func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil)
@@ -168,7 +198,8 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
// pairs are treated as they are in With.
//
// When debug-level logging is disabled, this is much faster than
-// s.With(keysAndValues).Debug(msg)
+//
+// s.With(keysAndValues).Debug(msg)
func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
s.log(DebugLevel, msg, nil, keysAndValues)
}
@@ -210,11 +241,48 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues)
}
+// Debugln uses fmt.Sprintln to construct and log a message.
+func (s *SugaredLogger) Debugln(args ...interface{}) {
+ s.logln(DebugLevel, args, nil)
+}
+
+// Infoln uses fmt.Sprintln to construct and log a message.
+func (s *SugaredLogger) Infoln(args ...interface{}) {
+ s.logln(InfoLevel, args, nil)
+}
+
+// Warnln uses fmt.Sprintln to construct and log a message.
+func (s *SugaredLogger) Warnln(args ...interface{}) {
+ s.logln(WarnLevel, args, nil)
+}
+
+// Errorln uses fmt.Sprintln to construct and log a message.
+func (s *SugaredLogger) Errorln(args ...interface{}) {
+ s.logln(ErrorLevel, args, nil)
+}
+
+// DPanicln uses fmt.Sprintln to construct and log a message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanicln(args ...interface{}) {
+ s.logln(DPanicLevel, args, nil)
+}
+
+// Panicln uses fmt.Sprintln to construct and log a message, then panics.
+func (s *SugaredLogger) Panicln(args ...interface{}) {
+ s.logln(PanicLevel, args, nil)
+}
+
+// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit.
+func (s *SugaredLogger) Fatalln(args ...interface{}) {
+ s.logln(FatalLevel, args, nil)
+}
+
// Sync flushes any buffered log entries.
func (s *SugaredLogger) Sync() error {
return s.base.Sync()
}
+// log message with Sprint, Sprintf, or neither.
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of
// string formatting.
@@ -228,6 +296,18 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf
}
}
+// logln message with Sprintln
+func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessageln(fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
// getMessage format with Sprint, Sprintf, or neither.
func getMessage(template string, fmtArgs []interface{}) string {
if len(fmtArgs) == 0 {
@@ -246,15 +326,24 @@ func getMessage(template string, fmtArgs []interface{}) string {
return fmt.Sprint(fmtArgs...)
}
+// getMessageln format with Sprintln.
+func getMessageln(fmtArgs []interface{}) string {
+ msg := fmt.Sprintln(fmtArgs...)
+ return msg[:len(msg)-1]
+}
+
func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
if len(args) == 0 {
return nil
}
- // Allocate enough space for the worst case; if users pass only structured
- // fields, we shouldn't penalize them with extra allocations.
- fields := make([]Field, 0, len(args))
- var invalid invalidPairs
+ var (
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields = make([]Field, 0, len(args))
+ invalid invalidPairs
+ seenError bool
+ )
for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on.
@@ -264,6 +353,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
continue
}
+ // If it is an error, consume it and move on.
+ if err, ok := args[i].(error); ok {
+ if !seenError {
+ seenError = true
+ fields = append(fields, Error(err))
+ } else {
+ s.base.Error(_multipleErrMsg, Error(err))
+ }
+ i++
+ continue
+ }
+
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/writer.go b/cluster-autoscaler/vendor/go.uber.org/zap/writer.go
index 86a709ab0be2..f08728e1ec00 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/writer.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/writer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,6 @@ package zap
import (
"fmt"
"io"
- "io/ioutil"
"go.uber.org/zap/zapcore"
@@ -69,9 +68,9 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
var openErr error
for _, path := range paths {
- sink, err := newSink(path)
+ sink, err := _sinkRegistry.newSink(path)
if err != nil {
- openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+ openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue
}
writers = append(writers, sink)
@@ -79,7 +78,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
}
if openErr != nil {
close()
- return writers, nil, openErr
+ return nil, nil, openErr
}
return writers, close, nil
@@ -93,7 +92,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 {
- return zapcore.AddSync(ioutil.Discard)
+ return zapcore.AddSync(io.Discard)
}
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
index ef2f7d9637bc..a40e93b3ec8f 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -43,6 +43,37 @@ const (
//
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+//
+// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
+// destination (*os.File is a valid WriteSyncer), wrap it with
+// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
+// object.
+//
+// func main() {
+// ws := ... // your log destination
+// bws := &zapcore.BufferedWriteSyncer{WS: ws}
+// defer bws.Stop()
+//
+// // ...
+// core := zapcore.NewCore(enc, bws, lvl)
+// logger := zap.New(core)
+//
+// // ...
+// }
+//
+// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
+// waiting at most 30 seconds between flushes.
+// You can customize these parameters by setting the Size or FlushInterval
+// fields.
+// For example, the following buffers up to 512 kB of logs before flushing them
+// to Stderr, with a maximum of one minute between each flush.
+//
+// ws := &BufferedWriteSyncer{
+// WS: os.Stderr,
+// Size: 512 * 1024, // 512 kB
+// FlushInterval: time.Minute,
+// }
+// defer ws.Stop()
type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes.
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/clock.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/clock.go
index d2ea95b394bc..422fd82a6b0f 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/clock.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/clock.go
@@ -20,9 +20,7 @@
package zapcore
-import (
- "time"
-)
+import "time"
// DefaultClock is the default clock used by Zap in operations that require
// time. This clock uses the system clock for all operations.
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/console_encoder.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/console_encoder.go
index 2307af404c5e..1aa5dc364673 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -125,11 +125,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
line.AppendString(ent.Stack)
}
- if c.LineEnding != "" {
- line.AppendString(c.LineEnding)
- } else {
- line.AppendString(DefaultLineEnding)
- }
+ line.AppendString(c.LineEnding)
return line, nil
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/core.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/core.go
index a1ef8b034bb4..9dfd64051f04 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/core.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/core.go
@@ -69,6 +69,15 @@ type ioCore struct {
out WriteSyncer
}
+var (
+ _ Core = (*ioCore)(nil)
+ _ leveledEnabler = (*ioCore)(nil)
+)
+
+func (c *ioCore) Level() Level {
+ return LevelOf(c.LevelEnabler)
+}
+
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/encoder.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/encoder.go
index 6601ca166c64..5769ff3e4e56 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/encoder.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -22,6 +22,7 @@ package zapcore
import (
"encoding/json"
+ "io"
"time"
"go.uber.org/zap/buffer"
@@ -187,10 +188,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error {
// UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
-// timeEncoder:
-// layout: 06/01/02 03:04pm
+//
+// timeEncoder:
+// layout: 06/01/02 03:04pm
+//
// If value is string, it uses UnmarshalText.
-// timeEncoder: iso8601
+//
+// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct {
Layout string `json:"layout" yaml:"layout"`
@@ -312,14 +316,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error {
type EncoderConfig struct {
// Set the keys used for each log entry. If any key is empty, that portion
// of the entry is omitted.
- MessageKey string `json:"messageKey" yaml:"messageKey"`
- LevelKey string `json:"levelKey" yaml:"levelKey"`
- TimeKey string `json:"timeKey" yaml:"timeKey"`
- NameKey string `json:"nameKey" yaml:"nameKey"`
- CallerKey string `json:"callerKey" yaml:"callerKey"`
- FunctionKey string `json:"functionKey" yaml:"functionKey"`
- StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
- LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
@@ -330,6 +335,9 @@ type EncoderConfig struct {
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configure the encoder for interface{} type objects.
+ // If not provided, objects are encoded using json.Encoder
+ NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
// Configures the field separator used by the console encoder. Defaults
// to tab.
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/entry.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/entry.go
index 0885505b75bc..9d326e95ea25 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/entry.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/entry.go
@@ -27,10 +27,9 @@ import (
"sync"
"time"
+ "go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
-
- "go.uber.org/multierr"
)
var (
@@ -152,6 +151,27 @@ type Entry struct {
Stack string
}
+// CheckWriteHook is a custom action that may be executed after an entry is
+// written.
+//
+// Register one on a CheckedEntry with the After method.
+//
+// if ce := logger.Check(...); ce != nil {
+// ce = ce.After(hook)
+// ce.Write(...)
+// }
+//
+// You can configure the hook for Fatal log statements at the logger level with
+// the zap.WithFatalHook option.
+type CheckWriteHook interface {
+ // OnWrite is invoked with the CheckedEntry that was written and a list
+ // of fields added with that entry.
+ //
+ // The list of fields DOES NOT include fields that were already added
+ // to the logger with the With method.
+ OnWrite(*CheckedEntry, []Field)
+}
+
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
@@ -164,21 +184,36 @@ const (
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
- // WriteThenFatal causes a fatal os.Exit after Write.
+ // WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
+// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
+// with the new CheckWriteHook interface which deprecates CheckWriteAction.
+func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
+ switch a {
+ case WriteThenGoexit:
+ runtime.Goexit()
+ case WriteThenPanic:
+ panic(ce.Message)
+ case WriteThenFatal:
+ exit.With(1)
+ }
+}
+
+var _ CheckWriteHook = CheckWriteAction(0)
+
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
-// CheckedEntry references should be created by calling AddCore or Should on a
+// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
- should CheckWriteAction
+ after CheckWriteHook
cores []Core
}
@@ -186,7 +221,7 @@ func (ce *CheckedEntry) reset() {
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
- ce.should = WriteThenNoop
+ ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
@@ -224,17 +259,11 @@ func (ce *CheckedEntry) Write(fields ...Field) {
ce.ErrorOutput.Sync()
}
- should, msg := ce.should, ce.Message
- putCheckedEntry(ce)
-
- switch should {
- case WriteThenPanic:
- panic(msg)
- case WriteThenFatal:
- exit.Exit()
- case WriteThenGoexit:
- runtime.Goexit()
+ hook := ce.after
+ if hook != nil {
+ hook.OnWrite(ce, fields)
}
+ putCheckedEntry(ce)
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@@ -252,11 +281,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
+//
+// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ return ce.After(ent, should)
+}
+
+// After sets this CheckEntry's CheckWriteHook, which will be called after this
+// log entry has been written. It's safe to call this on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
- ce.should = should
+ ce.after = hook
return ce
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/error.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/error.go
index 74919b0ccb1b..06359907af41 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/error.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/error.go
@@ -36,13 +36,13 @@ import (
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
-// {
-// "error": err.Error(),
-// "errorVerbose": fmt.Sprintf("%+v", err),
-// "errorCauses": [
-// ...
-// ],
-// }
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/hook.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/hook.go
index 5db4afb302b3..198def9917ce 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/hook.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/hook.go
@@ -27,6 +27,11 @@ type hooked struct {
funcs []func(Entry) error
}
+var (
+ _ Core = (*hooked)(nil)
+ _ leveledEnabler = (*hooked)(nil)
+)
+
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
}
}
+func (h *hooked) Level() Level {
+ return LevelOf(h.Core)
+}
+
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/increase_level.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/increase_level.go
index 5a1749261ab2..7a11237ae976 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/increase_level.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -27,6 +27,11 @@ type levelFilterCore struct {
level LevelEnabler
}
+var (
+ _ Core = (*levelFilterCore)(nil)
+ _ leveledEnabler = (*levelFilterCore)(nil)
+)
+
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool {
return c.level.Enabled(lvl)
}
+func (c *levelFilterCore) Level() Level {
+ return LevelOf(c.level)
+}
+
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/json_encoder.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/json_encoder.go
index 5cf7d917e92c..3921c5cd333e 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -22,7 +22,6 @@ package zapcore
import (
"encoding/base64"
- "encoding/json"
"math"
"sync"
"time"
@@ -64,7 +63,7 @@ type jsonEncoder struct {
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
- reflectEnc *json.Encoder
+ reflectEnc ReflectedEncoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@@ -72,7 +71,9 @@ type jsonEncoder struct {
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
-// {"foo":"bar","foo":"baz"}
+//
+// {"foo":"bar","foo":"baz"}
+//
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
@@ -82,6 +83,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder {
}
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ if cfg.SkipLineEnding {
+ cfg.LineEnding = ""
+ } else if cfg.LineEnding == "" {
+ cfg.LineEnding = DefaultLineEnding
+ }
+
+ // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+ if cfg.NewReflectedEncoder == nil {
+ cfg.NewReflectedEncoder = defaultReflectedEncoder
+ }
+
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
@@ -118,6 +130,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.AppendComplex128(val)
}
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+ enc.addKey(key)
+ enc.AppendComplex64(val)
+}
+
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
@@ -128,6 +145,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) {
enc.AppendFloat64(val)
}
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
@@ -136,10 +158,7 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) {
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
- enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
-
- // For consistency with our custom JSON encoder.
- enc.reflectEnc.SetEscapeHTML(false)
+ enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
@@ -201,10 +220,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ // Close ONLY new openNamespaces that are created during
+ // AppendObject().
+ old := enc.openNamespaces
+ enc.openNamespaces = 0
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
+ enc.closeOpenNamespaces()
+ enc.openNamespaces = old
return err
}
@@ -220,16 +245,23 @@ func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.buf.AppendByte('"')
}
-func (enc *jsonEncoder) AppendComplex128(val complex128) {
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
- enc.buf.AppendFloat(r, 64)
- enc.buf.AppendByte('+')
- enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendFloat(r, precision)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, precision)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
@@ -292,29 +324,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.buf.AppendUint(val)
}
-func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
-func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
-func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
-func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
-func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
-func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
@@ -335,7 +366,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final := enc.clone()
final.buf.AppendByte('{')
- if final.LevelKey != "" {
+ if final.LevelKey != "" && final.EncodeLevel != nil {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
@@ -396,11 +427,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
- if final.LineEnding != "" {
- final.buf.AppendString(final.LineEnding)
- } else {
- final.buf.AppendString(DefaultLineEnding)
- }
+ final.buf.AppendString(final.LineEnding)
ret := final.buf
putJSONEncoder(final)
@@ -415,6 +442,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
+ enc.openNamespaces = 0
}
func (enc *jsonEncoder) addKey(key string) {
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/level.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/level.go
index e575c9f432c2..e01a2413166d 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/level.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/level.go
@@ -53,8 +53,62 @@ const (
_minLevel = DebugLevel
_maxLevel = FatalLevel
+
+ // InvalidLevel is an invalid value for Level.
+ //
+ // Core implementations may panic if they see messages of this level.
+ InvalidLevel = _maxLevel + 1
)
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+ var level Level
+ err := level.UnmarshalText([]byte(text))
+ return level, err
+}
+
+type leveledEnabler interface {
+ LevelEnabler
+
+ Level() Level
+}
+
+// LevelOf reports the minimum enabled log level for the given LevelEnabler
+// from Zap's supported log levels, or [InvalidLevel] if none of them are
+// enabled.
+//
+// A LevelEnabler may implement a 'Level() Level' method to override the
+// behavior of this function.
+//
+// func (c *core) Level() Level {
+// return c.currentLevel
+// }
+//
+// It is recommended that [Core] implementations that wrap other cores use
+// LevelOf to retrieve the level of the wrapped core. For example,
+//
+// func (c *coreWrapper) Level() Level {
+// return zapcore.LevelOf(c.wrappedCore)
+// }
+func LevelOf(enab LevelEnabler) Level {
+ if lvler, ok := enab.(leveledEnabler); ok {
+ return lvler.Level()
+ }
+
+ for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
+ if enab.Enabled(lvl) {
+ return lvl
+ }
+ }
+
+ return InvalidLevel
+}
+
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
new file mode 100644
index 000000000000..8746360eca6f
--- /dev/null
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+ // Encode encodes and writes to the underlying data stream.
+ Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+ enc := json.NewEncoder(w)
+ // For consistency with our custom JSON encoder.
+ enc.SetEscapeHTML(false)
+ return enc
+}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/sampler.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/sampler.go
index 31ed96e129fb..dc518055a417 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/sampler.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -113,12 +113,12 @@ func nopSamplingHook(Entry, SamplingDecision) {}
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
-// var dropped atomic.Int64
-// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
-// if dec&zapcore.LogDropped > 0 {
-// dropped.Inc()
-// }
-// })
+// var dropped atomic.Int64
+// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+// if dec&zapcore.LogDropped > 0 {
+// dropped.Inc()
+// }
+// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
@@ -133,10 +133,21 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
+// For example,
+//
+// core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
-// Keep in mind that zap's sampling implementation is optimized for speed over
+// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@@ -164,6 +175,11 @@ type sampler struct {
hook func(Entry, SamplingDecision)
}
+var (
+ _ Core = (*sampler)(nil)
+ _ leveledEnabler = (*sampler)(nil)
+)
+
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@@ -181,6 +197,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter)
}
+func (s *sampler) Level() Level {
+ return LevelOf(s.Core)
+}
+
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),
@@ -200,7 +220,7 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
if ent.Level >= _minLevel && ent.Level <= _maxLevel {
counter := s.counts.get(ent.Level, ent.Message)
n := counter.IncCheckReset(ent.Time, s.tick)
- if n > s.first && (n-s.first)%s.thereafter != 0 {
+ if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
s.hook(ent, LogDropped)
return ce
}
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/tee.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/tee.go
index 07a32eef9a45..9bb32f055764 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/tee.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapcore/tee.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -24,6 +24,11 @@ import "go.uber.org/multierr"
type multiCore []Core
+var (
+ _ leveledEnabler = multiCore(nil)
+ _ Core = multiCore(nil)
+)
+
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core {
return clone
}
+func (mc multiCore) Level() Level {
+ minLvl := _maxLevel // mc is never empty
+ for i := range mc {
+ if lvl := LevelOf(mc[i]); lvl < minLvl {
+ minLvl = lvl
+ }
+ }
+ return minLvl
+}
+
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {
diff --git a/cluster-autoscaler/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/cluster-autoscaler/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
index 356e12741e09..71ca30b51135 100644
--- a/cluster-autoscaler/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
+++ b/cluster-autoscaler/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
@@ -61,6 +61,7 @@ func (f optionFunc) apply(log *Logger) {
// WithDebug configures a Logger to print at zap's DebugLevel instead of
// InfoLevel.
// It only affects the Printf, Println and Print methods, which are only used in the gRPC v1 grpclog.Logger API.
+//
// Deprecated: use grpclog.SetLoggerV2() for v2 API.
func WithDebug() Option {
return optionFunc(func(logger *Logger) {
@@ -146,19 +147,22 @@ type Logger struct {
}
// Print implements grpclog.Logger.
-// Deprecated: use Info().
+//
+// Deprecated: use [Logger.Info].
func (l *Logger) Print(args ...interface{}) {
l.print.Print(args...)
}
// Printf implements grpclog.Logger.
-// Deprecated: use Infof().
+//
+// Deprecated: use [Logger.Infof].
func (l *Logger) Printf(format string, args ...interface{}) {
l.print.Printf(format, args...)
}
// Println implements grpclog.Logger.
-// Deprecated: use Info().
+//
+// Deprecated: use [Logger.Info].
func (l *Logger) Println(args ...interface{}) {
l.print.Println(args...)
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/asn1.go
index 401414dde2f4..3141a7f1b986 100644
--- a/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/asn1.go
+++ b/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/asn1.go
@@ -559,7 +559,7 @@ func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
return true
}
-// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. It is
+// ReadASN1BitStringAsBytes decodes an ASN.1 BIT STRING into out and advances. It is
// an error if the BIT STRING is not a whole number of bytes. It reports
// whether the read was successful.
func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool {
diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/builder.go b/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/builder.go
index 2a90c592d7c6..c05ac7d16da7 100644
--- a/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/builder.go
+++ b/cluster-autoscaler/vendor/golang.org/x/crypto/cryptobyte/builder.go
@@ -303,9 +303,9 @@ func (b *Builder) add(bytes ...byte) {
b.result = append(b.result, bytes...)
}
-// Unwrite rolls back n bytes written directly to the Builder. An attempt by a
-// child builder passed to a continuation to unwrite bytes from its parent will
-// panic.
+// Unwrite rolls back non-negative n bytes written directly to the Builder.
+// An attempt by a child builder passed to a continuation to unwrite bytes
+// from its parent will panic.
func (b *Builder) Unwrite(n int) {
if b.err != nil {
return
@@ -317,6 +317,9 @@ func (b *Builder) Unwrite(n int) {
if length < 0 {
panic("cryptobyte: internal error")
}
+ if n < 0 {
+ panic("cryptobyte: attempted to unwrite negative number of bytes")
+ }
if n > length {
panic("cryptobyte: attempted to unwrite more than was written")
}
diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/LICENSE b/cluster-autoscaler/vendor/golang.org/x/exp/LICENSE
new file mode 100644
index 000000000000..6a66aea5eafe
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/PATENTS b/cluster-autoscaler/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 000000000000..733099041f84
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/constraints/constraints.go b/cluster-autoscaler/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 000000000000..2c033dff47e9
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/slices/slices.go b/cluster-autoscaler/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 000000000000..cff0cd49ecfb
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,258 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+// Unless otherwise specified, these functions all apply to the elements
+// of a slice at index 0 <= i < len(s).
+//
+// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
+// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
+// or the sorting may fail to sort correctly. A common case is when sorting slices of
+// floating-point numbers containing NaN values.
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[E comparable](s1, s2 []E) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using a comparison
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2.
+// The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+// Comparisons involving floating point NaNs are ignored.
+func Compare[E constraints.Ordered](s1, s2 []E) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ switch {
+ case v1 < v2:
+ return -1
+ case v1 > v2:
+ return +1
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like Compare but uses a comparison function
+// on each pair of elements. The elements are compared in increasing
+// index order, and the comparisons stop after the first time cmp
+// returns non-zero.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[E comparable](s []E, v E) int {
+ for i, vs := range s {
+ if v == vs {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[E any](s []E, f func(E) bool) int {
+ for i, v := range s {
+ if f(v) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[E comparable](s []E, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[E any](s []E, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// In the returned slice r, r[i] == v[0].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ tot := len(s) + len(v)
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[i:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[i:])
+ return s2
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete modifies the contents of the slice s; it does not create a new slice.
+// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
+// elements contain pointers you might consider zeroing those elements so that
+// objects they reference can be garbage collected.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j] // bounds check
+
+ return append(s[:i], s[j:]...)
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[j:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s; it does not create a new slice.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if v != last {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like Compact but uses a comparison function.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if !eq(v, last) {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/slices/sort.go b/cluster-autoscaler/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 000000000000..f14f40da712a
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,126 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// Sort may fail to sort correctly when sorting slices of floating-point
+// numbers containing Not-a-number (NaN) values.
+// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
+// instead if the input may contain NaNs.
+func Sort[E constraints.Ordered](x []E) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the less function.
+// This sort is not guaranteed to be stable.
+//
+// SortFunc requires that less is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[E any](x []E, less func(a, b E) bool) {
+ n := len(x)
+ pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using less to compare elements.
+func SortStableFunc[E any](x []E, less func(a, b E) bool) {
+ stableLessFunc(x, len(x), less)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[E constraints.Ordered](x []E) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if x[i] < x[i-1] {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with less as the
+// comparison function.
+func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if less(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if x[h] < target {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && x[i] == target
+}
+
+// BinarySearchFunc works like BinarySearch, but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing" is
+// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
+// parameters: 0 if a == b, a negative number if a < b and a positive number if
+// a > b.
+func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/slices/zsortfunc.go b/cluster-autoscaler/vendor/golang.org/x/exp/slices/zsortfunc.go
new file mode 100644
index 000000000000..2a632476c50a
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/exp/slices/zsortfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortLessFunc sorts data[a:b] using insertion sort.
+func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && less(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownLessFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && less(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !less(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownLessFunc(data, i, hi, first, less)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownLessFunc(data, lo, i, first, less)
+ }
+}
+
+// pdqsortLessFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsLessFunc(data, a, b, less)
+ limit--
+ }
+
+ pivot, hint := choosePivotLessFunc(data, a, b, less)
+ if hint == decreasingHint {
+ reverseRangeLessFunc(data, a, b, less)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortLessFunc(data, a, b, less) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !less(data[a-1], data[pivot]) {
+ mid := partitionEqualLessFunc(data, a, b, pivot, less)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortLessFunc(data, a, mid, limit, less)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortLessFunc(data, mid+1, b, limit, less)
+ b = mid
+ }
+ }
+}
+
+// partitionLessFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !less(data[a], data[i]) {
+ i++
+ }
+ for i <= j && less(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !less(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotLessFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentLessFunc(data, i, &swaps, less)
+ j = medianAdjacentLessFunc(data, j, &swaps, less)
+ k = medianAdjacentLessFunc(data, k, &swaps, less)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianLessFunc(data, i, j, k, &swaps, less)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
+ if less(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ b, c = order2LessFunc(data, b, c, swaps, less)
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ return b
+}
+
+// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
+ return medianLessFunc(data, a-1, a, a+1, swaps, less)
+}
+
+func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortLessFunc(data, a, b, less)
+ a = b
+ b += blockSize
+ }
+ insertionSortLessFunc(data, a, n, less)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeLessFunc(data, a, a+blockSize, b, less)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeLessFunc(data, a, m, n, less)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if less(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !less(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !less(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateLessFunc(data, start, m, end, less)
+ }
+ if a < start && start < mid {
+ symMergeLessFunc(data, a, start, mid, less)
+ }
+ if mid < end && end < b {
+ symMergeLessFunc(data, mid, end, b, less)
+ }
+}
+
+// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeLessFunc(data, m-i, m, j, less)
+ i -= j
+ } else {
+ swapRangeLessFunc(data, m-i, m+j-i, i, less)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeLessFunc(data, m-i, m, i, less)
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/slices/zsortordered.go b/cluster-autoscaler/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 000000000000..efaa1c8b7141
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j] < data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (data[first+child] < data[first+child+1]) {
+ child++
+ }
+ if !(data[first+root] < data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(data[a-1] < data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(data[a] < data[i]) {
+ i++
+ }
+ for i <= j && (data[a] < data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(data[i] < data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if data[b] < data[a] {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data[h] < data[a] {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(data[m] < data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(data[p-c] < data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/cluster-autoscaler/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/cluster-autoscaler/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
deleted file mode 100644
index 37dc0cfdb5b0..000000000000
--- a/cluster-autoscaler/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
-package ctxhttp // import "golang.org/x/net/context/ctxhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/url"
- "strings"
-)
-
-// Do sends an HTTP request with the provided http.Client and returns
-// an HTTP response.
-//
-// If the client is nil, http.DefaultClient is used.
-//
-// The provided ctx must be non-nil. If it is canceled or times out,
-// ctx.Err() will be returned.
-func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
- if client == nil {
- client = http.DefaultClient
- }
- resp, err := client.Do(req.WithContext(ctx))
- // If we got an error, and the context has been canceled,
- // the context's error is probably more useful.
- if err != nil {
- select {
- case <-ctx.Done():
- err = ctx.Err()
- default:
- }
- }
- return resp, err
-}
-
-// Get issues a GET request via the Do function.
-func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Head issues a HEAD request via the Do function.
-func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Post issues a POST request via the Do function.
-func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
- req, err := http.NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return Do(ctx, client, req)
-}
-
-// PostForm issues a POST request via the Do function.
-func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
- return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/doc.go b/cluster-autoscaler/vendor/golang.org/x/net/html/doc.go
index 822ed42a04c1..5ff8480cf571 100644
--- a/cluster-autoscaler/vendor/golang.org/x/net/html/doc.go
+++ b/cluster-autoscaler/vendor/golang.org/x/net/html/doc.go
@@ -92,6 +92,21 @@ example, to process each anchor node in depth-first order:
The relevant specifications include:
https://html.spec.whatwg.org/multipage/syntax.html and
https://html.spec.whatwg.org/multipage/syntax.html#tokenization
+
+# Security Considerations
+
+Care should be taken when parsing and interpreting HTML, whether full documents
+or fragments, within the framework of the HTML specification, especially with
+regard to untrusted inputs.
+
+This package provides both a tokenizer and a parser. Only the parser constructs
+a DOM according to the HTML specification, resolving malformed and misplaced
+tags where appropriate. The tokenizer simply tokenizes the HTML presented to it,
+and as such does not resolve issues that may exist in the processed HTML,
+producing a literal interpretation of the input.
+
+If your use case requires semantically well-formed HTML, as defined by the
+WHATWG specification, the parser should be used rather than the tokenizer.
*/
package html // import "golang.org/x/net/html"
diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/escape.go b/cluster-autoscaler/vendor/golang.org/x/net/html/escape.go
index d8561396200e..04c6bec21073 100644
--- a/cluster-autoscaler/vendor/golang.org/x/net/html/escape.go
+++ b/cluster-autoscaler/vendor/golang.org/x/net/html/escape.go
@@ -193,6 +193,87 @@ func lower(b []byte) []byte {
return b
}
+// escapeComment is like func escape but escapes its input bytes less often.
+// Per https://github.com/golang/go/issues/58246 some HTML comments are (1)
+// meaningful and (2) contain angle brackets that we'd like to avoid escaping
+// unless we have to.
+//
+// "We have to" includes the '&' byte, since that introduces other escapes.
+//
+// It also includes those bytes (not including EOF) that would otherwise end
+// the comment. Per the summary table at the bottom of comment_test.go, this is
+// the '>' byte that, per above, we'd like to avoid escaping unless we have to.
+//
+// Studying the summary table (and T actions in its '>' column) closely, we
+// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the
+// start of the comment data. State 52 is after a '!'. The other three states
+// are after a '-'.
+//
+// Our algorithm is thus to escape every '&' and to escape '>' if and only if:
+// - The '>' is after a '!' or '-' (in the unescaped data) or
+// - The '>' is at the start of the comment data (after the opening ""); err != nil {
diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/token.go b/cluster-autoscaler/vendor/golang.org/x/net/html/token.go
index ae24a6fdf4a4..5c2a1f4efa55 100644
--- a/cluster-autoscaler/vendor/golang.org/x/net/html/token.go
+++ b/cluster-autoscaler/vendor/golang.org/x/net/html/token.go
@@ -110,7 +110,7 @@ func (t Token) String() string {
case SelfClosingTagToken:
return "<" + t.tagString() + "/>"
case CommentToken:
- return ""
+ return ""
case DoctypeToken:
return ""
}
@@ -598,6 +598,11 @@ scriptDataDoubleEscapeEnd:
// readComment reads the next comment token starting with "")
return
+ } else if c == '-' {
+ dashCount = 1
+ beginning = false
+ continue
}
}
}
@@ -645,6 +649,35 @@ func (z *Tokenizer) readComment() {
}
}
+func (z *Tokenizer) calculateAbruptCommentDataEnd() int {
+ raw := z.Raw()
+ const prefixLen = len("